comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
This can be in-lined as the third parameter to `requestRandomNames` below. | private void assignApplicationRandomId(AssignedCertificate instanceLevelAssignedCertificate, Optional<AssignedCertificate> applicationLevelAssignedCertificate) {
TenantAndApplicationId tenantAndApplicationId = instanceLevelAssignedCertificate.application();
if (applicationLevelAssignedCertificate.isPresent()) {
applicationLevelAssignedCertificate.get().certificate().randomizedId().orElseThrow(() -> new IllegalArgumentException("Application certificate already assigned to " + tenantAndApplicationId.toString() + ", but random id is missing"));
EndpointCertificate withRandomNames = requestRandomNames(tenantAndApplicationId, instanceLevelAssignedCertificate.instance(), applicationLevelAssignedCertificate.get().certificate().randomizedId().get(), Optional.of(instanceLevelAssignedCertificate.certificate()));
AssignedCertificate assignedCertWithRandomNames = instanceLevelAssignedCertificate.with(withRandomNames);
curator.writeAssignedCertificate(assignedCertWithRandomNames);
} else {
String randomId = generateRandomId();
EndpointCertificate applicationLevelEndpointCert = requestRandomNames(tenantAndApplicationId, Optional.empty(), randomId, Optional.empty());
AssignedCertificate applicationLevelCert = new AssignedCertificate(tenantAndApplicationId, Optional.empty(), applicationLevelEndpointCert);
EndpointCertificate instanceLevelEndpointCert = requestRandomNames(tenantAndApplicationId, instanceLevelAssignedCertificate.instance(), randomId, Optional.of(instanceLevelAssignedCertificate.certificate()));
instanceLevelAssignedCertificate = instanceLevelAssignedCertificate.with(instanceLevelEndpointCert);
try (NestedTransaction transaction = new NestedTransaction()) {
curator.writeAssignedCertificate(instanceLevelAssignedCertificate, transaction);
curator.writeAssignedCertificate(applicationLevelCert, transaction);
transaction.commit();
}
}
} | applicationLevelAssignedCertificate.get().certificate().randomizedId().orElseThrow(() -> new IllegalArgumentException("Application certificate already assigned to " + tenantAndApplicationId.toString() + ", but random id is missing")); | private void assignApplicationRandomId(AssignedCertificate instanceLevelAssignedCertificate, Optional<AssignedCertificate> applicationLevelAssignedCertificate) {
TenantAndApplicationId tenantAndApplicationId = instanceLevelAssignedCertificate.application();
if (applicationLevelAssignedCertificate.isPresent()) {
EndpointCertificate withRandomNames = requestRandomNames(
tenantAndApplicationId,
instanceLevelAssignedCertificate.instance(),
applicationLevelAssignedCertificate.get().certificate().randomizedId()
.orElseThrow(() -> new IllegalArgumentException("Application certificate already assigned to " + tenantAndApplicationId.toString() + ", but random id is missing")),
Optional.of(instanceLevelAssignedCertificate.certificate()));
AssignedCertificate assignedCertWithRandomNames = instanceLevelAssignedCertificate.with(withRandomNames);
curator.writeAssignedCertificate(assignedCertWithRandomNames);
} else {
String randomId = generateRandomId();
EndpointCertificate applicationLevelEndpointCert = requestRandomNames(tenantAndApplicationId, Optional.empty(), randomId, Optional.empty());
AssignedCertificate applicationLevelCert = new AssignedCertificate(tenantAndApplicationId, Optional.empty(), applicationLevelEndpointCert);
EndpointCertificate instanceLevelEndpointCert = requestRandomNames(tenantAndApplicationId, instanceLevelAssignedCertificate.instance(), randomId, Optional.of(instanceLevelAssignedCertificate.certificate()));
instanceLevelAssignedCertificate = instanceLevelAssignedCertificate.with(instanceLevelEndpointCert);
try (NestedTransaction transaction = new NestedTransaction()) {
curator.writeAssignedCertificate(instanceLevelAssignedCertificate, transaction);
curator.writeAssignedCertificate(applicationLevelCert, transaction);
transaction.commit();
}
}
} | class EndpointCertificateMaintainer extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(EndpointCertificateMaintainer.class.getName());
private final DeploymentTrigger deploymentTrigger;
private final Clock clock;
private final CuratorDb curator;
private final SecretStore secretStore;
private final EndpointSecretManager endpointSecretManager;
private final EndpointCertificateProvider endpointCertificateProvider;
final Comparator<EligibleJob> oldestFirst = Comparator.comparing(e -> e.deployment.at());
final BooleanFlag assignRandomizedId;
private final StringFlag endpointCertificateAlgo;
private final BooleanFlag useAlternateCertProvider;
@Inject
public EndpointCertificateMaintainer(Controller controller, Duration interval) {
super(controller, interval);
this.deploymentTrigger = controller.applications().deploymentTrigger();
this.clock = controller.clock();
this.secretStore = controller.secretStore();
this.endpointSecretManager = controller.serviceRegistry().secretManager();
this.curator = controller().curator();
this.endpointCertificateProvider = controller.serviceRegistry().endpointCertificateProvider();
this.assignRandomizedId = Flags.ASSIGN_RANDOMIZED_ID.bindTo(controller.flagSource());
this.useAlternateCertProvider = PermanentFlags.USE_ALTERNATIVE_ENDPOINT_CERTIFICATE_PROVIDER.bindTo(controller.flagSource());
this.endpointCertificateAlgo = PermanentFlags.ENDPOINT_CERTIFICATE_ALGORITHM.bindTo(controller.flagSource());
}
@Override
protected double maintain() {
try {
deployRefreshedCertificates();
updateRefreshedCertificates();
deleteUnusedCertificates();
deleteOrReportUnmanagedCertificates();
assignRandomizedIds();
} catch (Exception e) {
log.log(Level.SEVERE, "Exception caught while maintaining endpoint certificates", e);
return 1.0;
}
return 0.0;
}
private void updateRefreshedCertificates() {
curator.readAssignedCertificates().forEach(assignedCertificate -> {
var latestAvailableVersion = latestVersionInSecretStore(assignedCertificate.certificate());
if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > assignedCertificate.certificate().version()) {
var refreshedCertificateMetadata = assignedCertificate.certificate()
.withVersion(latestAvailableVersion.getAsInt())
.withLastRefreshed(clock.instant().getEpochSecond());
try (Mutex lock = lock(assignedCertificate.application())) {
if (unchanged(assignedCertificate, lock)) {
try (NestedTransaction transaction = new NestedTransaction()) {
curator.writeAssignedCertificate(assignedCertificate.with(refreshedCertificateMetadata), transaction);
transaction.commit();
}
}
}
}
});
}
private boolean unchanged(AssignedCertificate assignedCertificate, @SuppressWarnings("unused") Mutex lock) {
return Optional.of(assignedCertificate).equals(curator.readAssignedCertificate(assignedCertificate.application(), assignedCertificate.instance()));
}
record EligibleJob(Deployment deployment, ApplicationId applicationId, JobType job) {}
/**
* If it's been four days since the cert has been refreshed, re-trigger prod deployment jobs (one at a time).
*/
private void deployRefreshedCertificates() {
var now = clock.instant();
var eligibleJobs = new ArrayList<EligibleJob>();
curator.readAssignedCertificates().forEach(assignedCertificate ->
assignedCertificate.certificate().lastRefreshed().ifPresent(lastRefreshTime -> {
Instant refreshTime = Instant.ofEpochSecond(lastRefreshTime);
if (now.isAfter(refreshTime.plus(4, ChronoUnit.DAYS))) {
if (assignedCertificate.instance().isPresent()) {
ApplicationId applicationId = assignedCertificate.application().instance(assignedCertificate.instance().get());
controller().applications().getInstance(applicationId)
.ifPresent(instance -> instance.productionDeployments().forEach((zone, deployment) -> {
if (deployment.at().isBefore(refreshTime)) {
JobType job = JobType.deploymentTo(zone);
eligibleJobs.add(new EligibleJob(deployment, applicationId, job));
}
}));
} else {
controller().applications().getApplication(assignedCertificate.application()).ifPresent(application -> {
application.instances().forEach((ignored, i) -> {
i.productionDeployments().forEach((zone, deployment) -> {
if (deployment.at().isBefore(refreshTime)) {
JobType job = JobType.deploymentTo(zone);
eligibleJobs.add(new EligibleJob(deployment, i.id(), job));
}
});
});
});
}
}
}));
eligibleJobs.stream()
.min(oldestFirst)
.ifPresent(e -> {
deploymentTrigger.reTrigger(e.applicationId, e.job, "re-triggered by EndpointCertificateMaintainer");
log.info("Re-triggering deployment job " + e.job.jobName() + " for instance " +
e.applicationId.serializedForm() + " to roll out refreshed endpoint certificate");
});
}
private OptionalInt latestVersionInSecretStore(EndpointCertificate originalCertificateMetadata) {
try {
var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName()));
var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName()));
return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max();
} catch (SecretNotFoundException s) {
return OptionalInt.empty();
}
}
private void deleteUnusedCertificates() {
var oneMonthAgo = clock.instant().minus(30, ChronoUnit.DAYS);
curator.readAssignedCertificates().forEach(assignedCertificate -> {
EndpointCertificate certificate = assignedCertificate.certificate();
var lastRequested = Instant.ofEpochSecond(certificate.lastRequested());
if (lastRequested.isBefore(oneMonthAgo) && hasNoDeployments(assignedCertificate.application())) {
try (Mutex lock = lock(assignedCertificate.application())) {
if (unchanged(assignedCertificate, lock)) {
log.log(Level.INFO, "Cert for app " + asString(assignedCertificate.application(), assignedCertificate.instance())
+ " has not been requested in a month and app has no deployments, deleting from provider, ZK and secret store");
endpointCertificateProvider.deleteCertificate(certificate.rootRequestId());
curator.removeAssignedCertificate(assignedCertificate.application(), assignedCertificate.instance());
endpointSecretManager.deleteSecret(certificate.certName());
endpointSecretManager.deleteSecret(certificate.keyName());
}
}
}
});
}
private Mutex lock(TenantAndApplicationId application) {
return curator.lock(application);
}
private boolean hasNoDeployments(TenantAndApplicationId application) {
Optional<Application> app = controller().applications().getApplication(application);
if (app.isEmpty()) return true;
for (var instance : app.get().instances().values()) {
if (!instance.deployments().isEmpty()) return false;
}
return true;
}
private void deleteOrReportUnmanagedCertificates() {
List<EndpointCertificateRequest> requests = endpointCertificateProvider.listCertificates();
List<AssignedCertificate> assignedCertificates = curator.readAssignedCertificates();
List<String> leafRequestIds = assignedCertificates.stream().map(AssignedCertificate::certificate).flatMap(m -> m.leafRequestId().stream()).toList();
List<String> rootRequestIds = assignedCertificates.stream().map(AssignedCertificate::certificate).map(EndpointCertificate::rootRequestId).toList();
List<UnassignedCertificate> unassignedCertificates = curator.readUnassignedCertificates();
List<String> certPoolRootIds = unassignedCertificates.stream().map(p -> p.certificate().leafRequestId()).flatMap(Optional::stream).toList();
List<String> certPoolLeafIds = unassignedCertificates.stream().map(p -> p.certificate().rootRequestId()).toList();
var managedIds = new HashSet<String>();
managedIds.addAll(leafRequestIds);
managedIds.addAll(rootRequestIds);
managedIds.addAll(certPoolRootIds);
managedIds.addAll(certPoolLeafIds);
for (var request : requests) {
if (!managedIds.contains(request.requestId())) {
EndpointCertificateDetails unknownCertDetails = endpointCertificateProvider.certificateDetails(request.requestId());
boolean matchFound = false;
for (AssignedCertificate assignedCertificate : assignedCertificates) {
if (assignedCertificate.certificate().certName().equals(unknownCertDetails.certKeyKeyname())) {
matchFound = true;
try (Mutex lock = lock(assignedCertificate.application())) {
if (unchanged(assignedCertificate, lock)) {
log.log(Level.INFO, "Cert for app " + asString(assignedCertificate.application(), assignedCertificate.instance())
+ " has a new leafRequestId " + unknownCertDetails.requestId() + ", updating in ZK");
try (NestedTransaction transaction = new NestedTransaction()) {
EndpointCertificate updated = assignedCertificate.certificate().withLeafRequestId(Optional.of(unknownCertDetails.requestId()));
curator.writeAssignedCertificate(assignedCertificate.with(updated), transaction);
transaction.commit();
}
}
break;
}
}
}
if (!matchFound) {
if (Instant.parse(request.createTime()).isBefore(Instant.now().minus(7, ChronoUnit.DAYS))) {
log.log(Level.INFO, String.format("Deleting unmaintained certificate with request_id %s and SANs %s",
request.requestId(),
request.dnsNames().stream().map(EndpointCertificateRequest.DnsNameStatus::dnsName).collect(Collectors.joining(", "))));
endpointCertificateProvider.deleteCertificate(request.requestId());
}
}
}
}
}
private void assignRandomizedIds() {
List<AssignedCertificate> assignedCertificates = curator.readAssignedCertificates();
/*
only assign randomized id if:
* instance is present
* randomized id is not already assigned
* feature flag is enabled
*/
assignedCertificates.stream()
.filter(c -> c.instance().isPresent())
.filter(c -> c.certificate().randomizedId().isEmpty())
.filter(c -> assignRandomizedId.with(FetchVector.Dimension.APPLICATION_ID, c.application().instance(c.instance().get()).serializedForm()).value())
.forEach(c -> assignRandomizedId(c.application(), c.instance().get()));
}
/*
Assign randomized id according to these rules:
* Instance is not mentioned in the deployment spec for this application
-> assume this is a manual deployment. Assign a randomized id to the certificate, save using instance only
* Instance is mentioned in deployment spec:
-> If there is a random endpoint assigned to tenant:application -> use this also for the "instance" certificate
-> Otherwise assign a random endpoint and write to the application and the instance.
*/
private void assignRandomizedId(TenantAndApplicationId tenantAndApplicationId, InstanceName instanceName) {
Optional<AssignedCertificate> assignedCertificate = curator.readAssignedCertificate(tenantAndApplicationId, Optional.of(instanceName));
if (assignedCertificate.isEmpty()) {
log.log(Level.INFO, "Assigned certificate missing for " + tenantAndApplicationId.instance(instanceName).toFullString() + " when assigning randomized id");
}
if (assignedCertificate.get().certificate().randomizedId().isPresent()) return;
controller().applications().lockApplicationOrThrow(tenantAndApplicationId, application -> {
DeploymentSpec deploymentSpec = application.get().deploymentSpec();
if (deploymentSpec.instance(instanceName).isPresent()) {
Optional<AssignedCertificate> applicationLevelAssignedCertificate = curator.readAssignedCertificate(tenantAndApplicationId, Optional.empty());
assignApplicationRandomId(assignedCertificate.get(), applicationLevelAssignedCertificate);
} else {
assignInstanceRandomId(assignedCertificate.get());
}
});
}
private void assignInstanceRandomId(AssignedCertificate assignedCertificate) {
String randomId = generateRandomId();
EndpointCertificate withRandomNames = requestRandomNames(assignedCertificate.application(), assignedCertificate.instance(), randomId, Optional.of(assignedCertificate.certificate()));
AssignedCertificate assignedCertWithRandomNames = assignedCertificate.with(withRandomNames);
curator.writeAssignedCertificate(assignedCertWithRandomNames);
}
private EndpointCertificate requestRandomNames(TenantAndApplicationId tenantAndApplicationId, Optional<InstanceName> instanceName, String randomId, Optional<EndpointCertificate> previousRequest) {
String dnsSuffix = Endpoint.dnsSuffix(controller().system());
List<String> newSanDnsEntries = List.of(
"*.%s.z%s".formatted(randomId, dnsSuffix),
"*.%s.g%s".formatted(randomId, dnsSuffix),
"*.%s.a%s".formatted(randomId, dnsSuffix));
List<String> existingSanDnsEntries = previousRequest.map(EndpointCertificate::requestedDnsSans).orElse(List.of());
List<String> requestNames = Stream.concat(existingSanDnsEntries.stream(), newSanDnsEntries.stream()).toList();
String key = instanceName.map(tenantAndApplicationId::instance).map(ApplicationId::toFullString).orElseGet(tenantAndApplicationId::toString);
return endpointCertificateProvider.requestCaSignedCertificate(
key,
requestNames,
previousRequest,
endpointCertificateAlgo.value(),
useAlternateCertProvider.value())
.withRandomizedId(randomId);
}
private String generateRandomId() {
List<String> unassignedIds = curator.readUnassignedCertificates().stream().map(UnassignedCertificate::id).toList();
List<String> assignedIds = curator.readAssignedCertificates().stream().map(AssignedCertificate::certificate).map(EndpointCertificate::randomizedId).filter(Optional::isPresent).map(Optional::get).toList();
Set<String> allIds = Stream.concat(unassignedIds.stream(), assignedIds.stream()).collect(Collectors.toSet());
String randomId;
do {
randomId = GeneratedEndpoint.createPart(controller().random(true));
} while (allIds.contains(randomId));
return randomId;
}
private static String asString(TenantAndApplicationId application, Optional<InstanceName> instanceName) {
return application.toString() + instanceName.map(name -> "." + name.value()).orElse("");
}
} | class EndpointCertificateMaintainer extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(EndpointCertificateMaintainer.class.getName());
private final DeploymentTrigger deploymentTrigger;
private final Clock clock;
private final CuratorDb curator;
private final SecretStore secretStore;
private final EndpointSecretManager endpointSecretManager;
private final EndpointCertificateProvider endpointCertificateProvider;
final Comparator<EligibleJob> oldestFirst = Comparator.comparing(e -> e.deployment.at());
final BooleanFlag assignRandomizedId;
private final StringFlag endpointCertificateAlgo;
private final BooleanFlag useAlternateCertProvider;
@Inject
public EndpointCertificateMaintainer(Controller controller, Duration interval) {
super(controller, interval);
this.deploymentTrigger = controller.applications().deploymentTrigger();
this.clock = controller.clock();
this.secretStore = controller.secretStore();
this.endpointSecretManager = controller.serviceRegistry().secretManager();
this.curator = controller().curator();
this.endpointCertificateProvider = controller.serviceRegistry().endpointCertificateProvider();
this.assignRandomizedId = Flags.ASSIGN_RANDOMIZED_ID.bindTo(controller.flagSource());
this.useAlternateCertProvider = PermanentFlags.USE_ALTERNATIVE_ENDPOINT_CERTIFICATE_PROVIDER.bindTo(controller.flagSource());
this.endpointCertificateAlgo = PermanentFlags.ENDPOINT_CERTIFICATE_ALGORITHM.bindTo(controller.flagSource());
}
@Override
protected double maintain() {
try {
deployRefreshedCertificates();
updateRefreshedCertificates();
deleteUnusedCertificates();
deleteOrReportUnmanagedCertificates();
assignRandomizedIds();
} catch (Exception e) {
log.log(Level.SEVERE, "Exception caught while maintaining endpoint certificates", e);
return 1.0;
}
return 0.0;
}
private void updateRefreshedCertificates() {
curator.readAssignedCertificates().forEach(assignedCertificate -> {
var latestAvailableVersion = latestVersionInSecretStore(assignedCertificate.certificate());
if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > assignedCertificate.certificate().version()) {
var refreshedCertificateMetadata = assignedCertificate.certificate()
.withVersion(latestAvailableVersion.getAsInt())
.withLastRefreshed(clock.instant().getEpochSecond());
try (Mutex lock = lock(assignedCertificate.application())) {
if (unchanged(assignedCertificate, lock)) {
try (NestedTransaction transaction = new NestedTransaction()) {
curator.writeAssignedCertificate(assignedCertificate.with(refreshedCertificateMetadata), transaction);
transaction.commit();
}
}
}
}
});
}
private boolean unchanged(AssignedCertificate assignedCertificate, @SuppressWarnings("unused") Mutex lock) {
return Optional.of(assignedCertificate).equals(curator.readAssignedCertificate(assignedCertificate.application(), assignedCertificate.instance()));
}
record EligibleJob(Deployment deployment, ApplicationId applicationId, JobType job) {}
/**
* If it's been four days since the cert has been refreshed, re-trigger prod deployment jobs (one at a time).
*/
private void deployRefreshedCertificates() {
var now = clock.instant();
var eligibleJobs = new ArrayList<EligibleJob>();
curator.readAssignedCertificates().forEach(assignedCertificate ->
assignedCertificate.certificate().lastRefreshed().ifPresent(lastRefreshTime -> {
Instant refreshTime = Instant.ofEpochSecond(lastRefreshTime);
if (now.isAfter(refreshTime.plus(4, ChronoUnit.DAYS))) {
if (assignedCertificate.instance().isPresent()) {
ApplicationId applicationId = assignedCertificate.application().instance(assignedCertificate.instance().get());
controller().applications().getInstance(applicationId)
.ifPresent(instance -> instance.productionDeployments().forEach((zone, deployment) -> {
if (deployment.at().isBefore(refreshTime)) {
JobType job = JobType.deploymentTo(zone);
eligibleJobs.add(new EligibleJob(deployment, applicationId, job));
}
}));
} else {
controller().applications().getApplication(assignedCertificate.application()).ifPresent(application -> {
application.instances().forEach((ignored, i) -> {
i.productionDeployments().forEach((zone, deployment) -> {
if (deployment.at().isBefore(refreshTime)) {
JobType job = JobType.deploymentTo(zone);
eligibleJobs.add(new EligibleJob(deployment, i.id(), job));
}
});
});
});
}
}
}));
eligibleJobs.stream()
.min(oldestFirst)
.ifPresent(e -> {
deploymentTrigger.reTrigger(e.applicationId, e.job, "re-triggered by EndpointCertificateMaintainer");
log.info("Re-triggering deployment job " + e.job.jobName() + " for instance " +
e.applicationId.serializedForm() + " to roll out refreshed endpoint certificate");
});
}
private OptionalInt latestVersionInSecretStore(EndpointCertificate originalCertificateMetadata) {
try {
var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName()));
var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName()));
return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max();
} catch (SecretNotFoundException s) {
return OptionalInt.empty();
}
}
private void deleteUnusedCertificates() {
var oneMonthAgo = clock.instant().minus(30, ChronoUnit.DAYS);
curator.readAssignedCertificates().forEach(assignedCertificate -> {
EndpointCertificate certificate = assignedCertificate.certificate();
var lastRequested = Instant.ofEpochSecond(certificate.lastRequested());
if (lastRequested.isBefore(oneMonthAgo) && hasNoDeployments(assignedCertificate.application())) {
try (Mutex lock = lock(assignedCertificate.application())) {
if (unchanged(assignedCertificate, lock)) {
log.log(Level.INFO, "Cert for app " + asString(assignedCertificate.application(), assignedCertificate.instance())
+ " has not been requested in a month and app has no deployments, deleting from provider, ZK and secret store");
endpointCertificateProvider.deleteCertificate(certificate.rootRequestId());
curator.removeAssignedCertificate(assignedCertificate.application(), assignedCertificate.instance());
endpointSecretManager.deleteSecret(certificate.certName());
endpointSecretManager.deleteSecret(certificate.keyName());
}
}
}
});
}
private Mutex lock(TenantAndApplicationId application) {
return curator.lock(application);
}
private boolean hasNoDeployments(TenantAndApplicationId application) {
Optional<Application> app = controller().applications().getApplication(application);
if (app.isEmpty()) return true;
for (var instance : app.get().instances().values()) {
if (!instance.deployments().isEmpty()) return false;
}
return true;
}
private void deleteOrReportUnmanagedCertificates() {
List<EndpointCertificateRequest> requests = endpointCertificateProvider.listCertificates();
List<AssignedCertificate> assignedCertificates = curator.readAssignedCertificates();
List<String> leafRequestIds = assignedCertificates.stream().map(AssignedCertificate::certificate).flatMap(m -> m.leafRequestId().stream()).toList();
List<String> rootRequestIds = assignedCertificates.stream().map(AssignedCertificate::certificate).map(EndpointCertificate::rootRequestId).toList();
List<UnassignedCertificate> unassignedCertificates = curator.readUnassignedCertificates();
List<String> certPoolRootIds = unassignedCertificates.stream().map(p -> p.certificate().leafRequestId()).flatMap(Optional::stream).toList();
List<String> certPoolLeafIds = unassignedCertificates.stream().map(p -> p.certificate().rootRequestId()).toList();
var managedIds = new HashSet<String>();
managedIds.addAll(leafRequestIds);
managedIds.addAll(rootRequestIds);
managedIds.addAll(certPoolRootIds);
managedIds.addAll(certPoolLeafIds);
for (var request : requests) {
if (!managedIds.contains(request.requestId())) {
EndpointCertificateDetails unknownCertDetails = endpointCertificateProvider.certificateDetails(request.requestId());
boolean matchFound = false;
for (AssignedCertificate assignedCertificate : assignedCertificates) {
if (assignedCertificate.certificate().certName().equals(unknownCertDetails.certKeyKeyname())) {
matchFound = true;
try (Mutex lock = lock(assignedCertificate.application())) {
if (unchanged(assignedCertificate, lock)) {
log.log(Level.INFO, "Cert for app " + asString(assignedCertificate.application(), assignedCertificate.instance())
+ " has a new leafRequestId " + unknownCertDetails.requestId() + ", updating in ZK");
try (NestedTransaction transaction = new NestedTransaction()) {
EndpointCertificate updated = assignedCertificate.certificate().withLeafRequestId(Optional.of(unknownCertDetails.requestId()));
curator.writeAssignedCertificate(assignedCertificate.with(updated), transaction);
transaction.commit();
}
}
break;
}
}
}
if (!matchFound) {
if (Instant.parse(request.createTime()).isBefore(Instant.now().minus(7, ChronoUnit.DAYS))) {
log.log(Level.INFO, String.format("Deleting unmaintained certificate with request_id %s and SANs %s",
request.requestId(),
request.dnsNames().stream().map(EndpointCertificateRequest.DnsNameStatus::dnsName).collect(Collectors.joining(", "))));
endpointCertificateProvider.deleteCertificate(request.requestId());
}
}
}
}
}
private void assignRandomizedIds() {
List<AssignedCertificate> assignedCertificates = curator.readAssignedCertificates();
/*
only assign randomized id if:
* instance is present
* randomized id is not already assigned
* feature flag is enabled
*/
assignedCertificates.stream()
.filter(c -> c.instance().isPresent())
.filter(c -> c.certificate().randomizedId().isEmpty())
.filter(c -> assignRandomizedId.with(FetchVector.Dimension.APPLICATION_ID, c.application().instance(c.instance().get()).serializedForm()).value())
.forEach(c -> assignRandomizedId(c.application(), c.instance().get()));
}
/*
Assign randomized id according to these rules:
* Instance is not mentioned in the deployment spec for this application
-> assume this is a manual deployment. Assign a randomized id to the certificate, save using instance only
* Instance is mentioned in deployment spec:
-> If there is a random endpoint assigned to tenant:application -> use this also for the "instance" certificate
-> Otherwise assign a random endpoint and write to the application and the instance.
*/
private void assignRandomizedId(TenantAndApplicationId tenantAndApplicationId, InstanceName instanceName) {
Optional<AssignedCertificate> assignedCertificate = curator.readAssignedCertificate(tenantAndApplicationId, Optional.of(instanceName));
if (assignedCertificate.isEmpty()) {
log.log(Level.INFO, "Assigned certificate missing for " + tenantAndApplicationId.instance(instanceName).toFullString() + " when assigning randomized id");
}
if (assignedCertificate.get().certificate().randomizedId().isPresent()) return;
controller().applications().lockApplicationOrThrow(tenantAndApplicationId, application -> {
DeploymentSpec deploymentSpec = application.get().deploymentSpec();
if (deploymentSpec.instance(instanceName).isPresent()) {
Optional<AssignedCertificate> applicationLevelAssignedCertificate = curator.readAssignedCertificate(tenantAndApplicationId, Optional.empty());
assignApplicationRandomId(assignedCertificate.get(), applicationLevelAssignedCertificate);
} else {
assignInstanceRandomId(assignedCertificate.get());
}
});
}
private void assignInstanceRandomId(AssignedCertificate assignedCertificate) {
String randomId = generateRandomId();
EndpointCertificate withRandomNames = requestRandomNames(assignedCertificate.application(), assignedCertificate.instance(), randomId, Optional.of(assignedCertificate.certificate()));
AssignedCertificate assignedCertWithRandomNames = assignedCertificate.with(withRandomNames);
curator.writeAssignedCertificate(assignedCertWithRandomNames);
}
private EndpointCertificate requestRandomNames(TenantAndApplicationId tenantAndApplicationId, Optional<InstanceName> instanceName, String randomId, Optional<EndpointCertificate> previousRequest) {
String dnsSuffix = Endpoint.dnsSuffix(controller().system());
List<String> newSanDnsEntries = List.of(
"*.%s.z%s".formatted(randomId, dnsSuffix),
"*.%s.g%s".formatted(randomId, dnsSuffix),
"*.%s.a%s".formatted(randomId, dnsSuffix));
List<String> existingSanDnsEntries = previousRequest.map(EndpointCertificate::requestedDnsSans).orElse(List.of());
List<String> requestNames = Stream.concat(existingSanDnsEntries.stream(), newSanDnsEntries.stream()).toList();
String key = instanceName.map(tenantAndApplicationId::instance).map(ApplicationId::toFullString).orElseGet(tenantAndApplicationId::toString);
return endpointCertificateProvider.requestCaSignedCertificate(
key,
requestNames,
previousRequest,
endpointCertificateAlgo.value(),
useAlternateCertProvider.value())
.withRandomizedId(randomId);
}
private String generateRandomId() {
List<String> unassignedIds = curator.readUnassignedCertificates().stream().map(UnassignedCertificate::id).toList();
List<String> assignedIds = curator.readAssignedCertificates().stream().map(AssignedCertificate::certificate).map(EndpointCertificate::randomizedId).filter(Optional::isPresent).map(Optional::get).toList();
Set<String> allIds = Stream.concat(unassignedIds.stream(), assignedIds.stream()).collect(Collectors.toSet());
String randomId;
do {
randomId = GeneratedEndpoint.createPart(controller().random(true));
} while (allIds.contains(randomId));
return randomId;
}
private static String asString(TenantAndApplicationId application, Optional<InstanceName> instanceName) {
return application.toString() + instanceName.map(name -> "." + name.value()).orElse("");
}
} |
Not 1? | protected double maintain() {
NodeList candidates = nodeRepository().nodes().list().rebuilding(true);
if (candidates.isEmpty()) {
return 0;
}
int failures = 0;
List<Node> rebuilding;
try (var locked = nodeRepository().nodes().lockAndGetAll(candidates.asList(), Optional.of(Duration.ofSeconds(10)))) {
rebuilding = locked.nodes().stream().map(NodeMutex::node).toList();
RebuildResult result = hostProvisioner.replaceRootDisk(rebuilding);
for (Node updated : result.rebuilt())
if (!updated.status().wantToRebuild())
nodeRepository().nodes().write(updated, () -> { });
for (var entry : result.failed().entrySet()) {
++failures;
log.log(Level.WARNING, "Failed to rebuild " + entry.getKey() + ", will retry in " +
interval() + ": " + Exceptions.toMessageString(entry.getValue()));
}
}
return asSuccessFactorDeviation(rebuilding.size(), failures);
} | return 0; | protected double maintain() {
NodeList candidates = nodeRepository().nodes().list().rebuilding(true);
if (candidates.isEmpty()) {
return 0;
}
int failures = 0;
List<Node> rebuilding;
try (var locked = nodeRepository().nodes().lockAndGetAll(candidates.asList(), Optional.of(Duration.ofSeconds(10)))) {
rebuilding = locked.nodes().stream().map(NodeMutex::node).toList();
RebuildResult result = hostProvisioner.replaceRootDisk(rebuilding);
for (Node updated : result.rebuilt())
if (!updated.status().wantToRebuild())
nodeRepository().nodes().write(updated, () -> { });
for (var entry : result.failed().entrySet()) {
++failures;
log.log(Level.WARNING, "Failed to rebuild " + entry.getKey() + ", will retry in " +
interval() + ": " + Exceptions.toMessageString(entry.getValue()));
}
}
return asSuccessFactorDeviation(rebuilding.size(), failures);
} | class DiskReplacer extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(DiskReplacer.class.getName());
private final HostProvisioner hostProvisioner;
DiskReplacer(NodeRepository nodeRepository, Duration interval, Metric metric, HostProvisioner hostProvisioner) {
super(nodeRepository, interval, metric);
this.hostProvisioner = hostProvisioner;
}
@Override
} | class DiskReplacer extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(DiskReplacer.class.getName());
private final HostProvisioner hostProvisioner;
DiskReplacer(NodeRepository nodeRepository, Duration interval, Metric metric, HostProvisioner hostProvisioner) {
super(nodeRepository, interval, metric);
this.hostProvisioner = hostProvisioner;
}
@Override
} |
The definition has changed. 0 now indicates there is no work to do. | protected double maintain() {
NodeList candidates = nodeRepository().nodes().list().rebuilding(true);
if (candidates.isEmpty()) {
return 0;
}
int failures = 0;
List<Node> rebuilding;
try (var locked = nodeRepository().nodes().lockAndGetAll(candidates.asList(), Optional.of(Duration.ofSeconds(10)))) {
rebuilding = locked.nodes().stream().map(NodeMutex::node).toList();
RebuildResult result = hostProvisioner.replaceRootDisk(rebuilding);
for (Node updated : result.rebuilt())
if (!updated.status().wantToRebuild())
nodeRepository().nodes().write(updated, () -> { });
for (var entry : result.failed().entrySet()) {
++failures;
log.log(Level.WARNING, "Failed to rebuild " + entry.getKey() + ", will retry in " +
interval() + ": " + Exceptions.toMessageString(entry.getValue()));
}
}
return asSuccessFactorDeviation(rebuilding.size(), failures);
} | return 0; | protected double maintain() {
NodeList candidates = nodeRepository().nodes().list().rebuilding(true);
if (candidates.isEmpty()) {
return 0;
}
int failures = 0;
List<Node> rebuilding;
try (var locked = nodeRepository().nodes().lockAndGetAll(candidates.asList(), Optional.of(Duration.ofSeconds(10)))) {
rebuilding = locked.nodes().stream().map(NodeMutex::node).toList();
RebuildResult result = hostProvisioner.replaceRootDisk(rebuilding);
for (Node updated : result.rebuilt())
if (!updated.status().wantToRebuild())
nodeRepository().nodes().write(updated, () -> { });
for (var entry : result.failed().entrySet()) {
++failures;
log.log(Level.WARNING, "Failed to rebuild " + entry.getKey() + ", will retry in " +
interval() + ": " + Exceptions.toMessageString(entry.getValue()));
}
}
return asSuccessFactorDeviation(rebuilding.size(), failures);
} | class DiskReplacer extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(DiskReplacer.class.getName());
private final HostProvisioner hostProvisioner;
DiskReplacer(NodeRepository nodeRepository, Duration interval, Metric metric, HostProvisioner hostProvisioner) {
super(nodeRepository, interval, metric);
this.hostProvisioner = hostProvisioner;
}
@Override
} | class DiskReplacer extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(DiskReplacer.class.getName());
private final HostProvisioner hostProvisioner;
DiskReplacer(NodeRepository nodeRepository, Duration interval, Metric metric, HostProvisioner hostProvisioner) {
super(nodeRepository, interval, metric);
this.hostProvisioner = hostProvisioner;
}
@Override
} |
`monitorUpdate.accept()` changes result of `isKnownWorking()`? | private void updateMonitoredNode(T node, Consumer<TrafficNodeMonitor<T>> monitorUpdate, Consumer<T> nodeUpdate) {
TrafficNodeMonitor<T> monitor = nodeMonitors.get(node);
if (closed.get()) monitor = null;
if (nodesToRemove.remove(node)) {
nodeMonitors.remove(node);
monitor = null;
}
if (monitor != null) {
Boolean wasWorking = monitor.isKnownWorking();
monitorUpdate.accept(monitor);
if (wasWorking != monitor.isKnownWorking())
nodeUpdate.accept(node);
}
nodesToUpdate.remove(node);
} | if (wasWorking != monitor.isKnownWorking()) | private void updateMonitoredNode(T node, Consumer<TrafficNodeMonitor<T>> monitorUpdate, Consumer<T> nodeUpdate) {
TrafficNodeMonitor<T> monitor = nodeMonitors.get(node);
if (closed.get()) monitor = null;
if (nodesToRemove.remove(node)) {
nodeMonitors.remove(node);
monitor = null;
}
if (monitor != null) {
Boolean wasWorking = monitor.isKnownWorking();
monitorUpdate.accept(monitor);
if (wasWorking != monitor.isKnownWorking())
nodeUpdate.accept(node);
}
nodesToUpdate.remove(node);
} | class ClusterMonitor<T> {
private static final Logger log = Logger.getLogger(ClusterMonitor.class.getName());
private final MonitorConfiguration configuration = new MonitorConfiguration();
private final NodeManager<T> nodeManager;
private final MonitorThread monitorThread;
private final AtomicBoolean closed = new AtomicBoolean(false);
/** A map from Node to corresponding MonitoredNode */
private final Map<T, TrafficNodeMonitor<T>> nodeMonitors = Collections.synchronizedMap(new LinkedHashMap<>());
private final Set<T> nodesToRemove = new LinkedHashSet<>();
private final Set<T> nodesToUpdate = new LinkedHashSet<>();
private boolean skipNextWait = false;
public ClusterMonitor(NodeManager<T> manager, boolean startPingThread) {
nodeManager = manager;
monitorThread = new MonitorThread("search.clustermonitor." + manager.name());
if (startPingThread) {
monitorThread.start();
}
}
/** Updates the monitored set of nodes, and waits for 1. data on new nodes, and 2. RPC completion of removed nodes. */
public synchronized void reconfigure(Collection<T> nodes) {
if ( ! monitorThread.isAlive()) throw new IllegalStateException("monitor thread must be alive for reconfiguration");
nodesToUpdate.addAll(nodes);
nodesToRemove.addAll(nodeMonitors.keySet());
nodesToRemove.removeAll(nodes);
for (T node : nodes) if ( ! nodeMonitors.containsKey(node)) add(node, true);
synchronized (nodeManager) { skipNextWait = true; nodeManager.notifyAll(); }
try { while ( ! nodesToRemove.isEmpty() || ! nodesToUpdate.isEmpty()) wait(1); }
catch (InterruptedException e) { throw new UncheckedInterruptedException(e, true); }
nodeManager.pingIterationCompleted();
}
public void start() {
if ( ! monitorThread.isAlive()) {
monitorThread.start();
}
}
/** Returns the configuration of this cluster monitor */
public MonitorConfiguration getConfiguration() { return configuration; }
public boolean isClosed() { return closed.get(); }
/**
* Adds a new node for monitoring.
* The object representing the node must
* <ul>
* <li>Have a sensible toString</li>
* <li>Have a sensible identity (equals and hashCode)</li>
* </ul>
*
* @param node the object representing the node
* @param internal whether this node is internal to this cluster
*/
public void add(T node, boolean internal) {
nodeMonitors.put(node, new TrafficNodeMonitor<>(node, configuration, internal));
}
/** Called from ClusterSearcher/NodeManager when a node failed */
public synchronized void failed(T node, ErrorMessage error) {
updateMonitoredNode(node, monitor -> monitor.failed(error), nodeManager::failed);
}
/** Called when a node responded */
public synchronized void responded(T node) {
updateMonitoredNode(node, TrafficNodeMonitor::responded, nodeManager::working);
}
/**
* Ping all nodes which needs pinging to discover state changes
*/
public synchronized void ping(Executor executor) {
for (var monitor : nodeMonitors()) {
if (closed.get()) return;
if (nodesToRemove.remove(monitor.getNode())) {
nodeMonitors.remove(monitor.getNode());
continue;
}
nodeManager.ping(this, monitor.getNode(), executor);
}
nodeManager.pingIterationCompleted();
}
/** Returns a thread-safe snapshot of the NodeMonitors of all added nodes */
public Iterator<BaseNodeMonitor<T>> nodeMonitorIterator() {
return nodeMonitors().iterator();
}
/** Returns a thread-safe snapshot of the NodeMonitors of all added nodes */
public List<BaseNodeMonitor<T>> nodeMonitors() {
return List.copyOf(nodeMonitors.values());
}
/** Must be called when this goes out of use */
public void shutdown() {
closed.set(true);
synchronized (this) {
nodeMonitors.clear();
}
synchronized (nodeManager) {
skipNextWait = true;
nodeManager.notifyAll();
}
try {
if (monitorThread.isAlive()) {
monitorThread.join();
}
} catch (InterruptedException e) {}
}
private class MonitorThread extends Thread {
MonitorThread(String name) {
super(name);
setDaemon(true);
}
public void run() {
log.info("Starting cluster monitor thread " + getName());
ExecutorService pingExecutor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("search.ping"));
while (!closed.get()) {
try {
log.finest("Activating ping");
ping(pingExecutor);
synchronized (nodeManager) {
if ( ! skipNextWait)
nodeManager.wait(configuration.getCheckInterval());
skipNextWait = false;
}
}
catch (Throwable e) {
if (closed.get() && e instanceof InterruptedException) {
break;
} else if ( ! (e instanceof Exception) ) {
log.log(Level.WARNING,"Error in monitor thread, will quit", e);
break;
} else {
log.log(Level.WARNING,"Exception in monitor thread", e);
}
}
}
pingExecutor.shutdown();
try {
if ( ! pingExecutor.awaitTermination(10, TimeUnit.SECONDS)) {
log.warning("Timeout waiting for ping executor to terminate");
}
} catch (InterruptedException e) { }
log.info("Stopped cluster monitor thread " + getName());
}
}
} | class ClusterMonitor<T> {
private static final Logger log = Logger.getLogger(ClusterMonitor.class.getName());
private final MonitorConfiguration configuration = new MonitorConfiguration();
private final NodeManager<T> nodeManager;
private final MonitorThread monitorThread;
private final AtomicBoolean closed = new AtomicBoolean(false);
/** A map from Node to corresponding MonitoredNode */
private final Map<T, TrafficNodeMonitor<T>> nodeMonitors = Collections.synchronizedMap(new LinkedHashMap<>());
private final Set<T> nodesToRemove = new LinkedHashSet<>();
private final Set<T> nodesToUpdate = new LinkedHashSet<>();
private boolean skipNextWait = false;
public ClusterMonitor(NodeManager<T> manager, boolean startPingThread) {
nodeManager = manager;
monitorThread = new MonitorThread("search.clustermonitor." + manager.name());
if (startPingThread) {
monitorThread.start();
}
}
/** Updates the monitored set of nodes, and waits for 1. data on new nodes, and 2. RPC completion of removed nodes. */
public synchronized void reconfigure(Collection<T> nodes) {
if ( ! monitorThread.isAlive()) throw new IllegalStateException("monitor thread must be alive for reconfiguration");
nodesToUpdate.addAll(nodes);
nodesToRemove.addAll(nodeMonitors.keySet());
nodesToRemove.removeAll(nodes);
for (T node : nodes) if ( ! nodeMonitors.containsKey(node)) add(node, true);
synchronized (nodeManager) { skipNextWait = true; nodeManager.notifyAll(); }
try { while ( ! nodesToRemove.isEmpty() || ! nodesToUpdate.isEmpty()) wait(1); }
catch (InterruptedException e) { throw new UncheckedInterruptedException(e, true); }
nodeManager.pingIterationCompleted();
}
public void start() {
if ( ! monitorThread.isAlive()) {
monitorThread.start();
}
}
/** Returns the configuration of this cluster monitor */
public MonitorConfiguration getConfiguration() { return configuration; }
public boolean isClosed() { return closed.get(); }
/**
* Adds a new node for monitoring.
* The object representing the node must
* <ul>
* <li>Have a sensible toString</li>
* <li>Have a sensible identity (equals and hashCode)</li>
* </ul>
*
* @param node the object representing the node
* @param internal whether this node is internal to this cluster
*/
public void add(T node, boolean internal) {
nodeMonitors.put(node, new TrafficNodeMonitor<>(node, configuration, internal));
}
/** Called from ClusterSearcher/NodeManager when a node failed */
public synchronized void failed(T node, ErrorMessage error) {
updateMonitoredNode(node, monitor -> monitor.failed(error), nodeManager::failed);
}
/** Called when a node responded */
public synchronized void responded(T node) {
updateMonitoredNode(node, TrafficNodeMonitor::responded, nodeManager::working);
}
/**
* Ping all nodes which needs pinging to discover state changes
*/
public synchronized void ping(Executor executor) {
for (var monitor : nodeMonitors()) {
if (closed.get()) return;
if (nodesToRemove.remove(monitor.getNode())) {
nodeMonitors.remove(monitor.getNode());
continue;
}
nodeManager.ping(this, monitor.getNode(), executor);
}
nodeManager.pingIterationCompleted();
}
/** Returns a thread-safe snapshot of the NodeMonitors of all added nodes */
public Iterator<BaseNodeMonitor<T>> nodeMonitorIterator() {
return nodeMonitors().iterator();
}
/** Returns a thread-safe snapshot of the NodeMonitors of all added nodes */
public List<BaseNodeMonitor<T>> nodeMonitors() {
return List.copyOf(nodeMonitors.values());
}
/** Must be called when this goes out of use */
public void shutdown() {
closed.set(true);
synchronized (this) {
nodeMonitors.clear();
}
synchronized (nodeManager) {
skipNextWait = true;
nodeManager.notifyAll();
}
try {
if (monitorThread.isAlive()) {
monitorThread.join();
}
} catch (InterruptedException e) {}
}
private class MonitorThread extends Thread {
MonitorThread(String name) {
super(name);
setDaemon(true);
}
public void run() {
log.info("Starting cluster monitor thread " + getName());
ExecutorService pingExecutor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("search.ping"));
while (!closed.get()) {
try {
log.finest("Activating ping");
ping(pingExecutor);
synchronized (nodeManager) {
if ( ! skipNextWait)
nodeManager.wait(configuration.getCheckInterval());
skipNextWait = false;
}
}
catch (Throwable e) {
if (closed.get() && e instanceof InterruptedException) {
break;
} else if ( ! (e instanceof Exception) ) {
log.log(Level.WARNING,"Error in monitor thread, will quit", e);
break;
} else {
log.log(Level.WARNING,"Exception in monitor thread", e);
}
}
}
pingExecutor.shutdown();
try {
if ( ! pingExecutor.awaitTermination(10, TimeUnit.SECONDS)) {
log.warning("Timeout waiting for ping executor to terminate");
}
} catch (InterruptedException e) { }
log.info("Stopped cluster monitor thread " + getName());
}
}
} |
Yes, I kept this code as it was. The cluster monitor is used elsewhere, and I suppose we want to avoid unnecessary updates to state there. | private void updateMonitoredNode(T node, Consumer<TrafficNodeMonitor<T>> monitorUpdate, Consumer<T> nodeUpdate) {
TrafficNodeMonitor<T> monitor = nodeMonitors.get(node);
if (closed.get()) monitor = null;
if (nodesToRemove.remove(node)) {
nodeMonitors.remove(node);
monitor = null;
}
if (monitor != null) {
Boolean wasWorking = monitor.isKnownWorking();
monitorUpdate.accept(monitor);
if (wasWorking != monitor.isKnownWorking())
nodeUpdate.accept(node);
}
nodesToUpdate.remove(node);
} | if (wasWorking != monitor.isKnownWorking()) | private void updateMonitoredNode(T node, Consumer<TrafficNodeMonitor<T>> monitorUpdate, Consumer<T> nodeUpdate) {
TrafficNodeMonitor<T> monitor = nodeMonitors.get(node);
if (closed.get()) monitor = null;
if (nodesToRemove.remove(node)) {
nodeMonitors.remove(node);
monitor = null;
}
if (monitor != null) {
Boolean wasWorking = monitor.isKnownWorking();
monitorUpdate.accept(monitor);
if (wasWorking != monitor.isKnownWorking())
nodeUpdate.accept(node);
}
nodesToUpdate.remove(node);
} | class ClusterMonitor<T> {
private static final Logger log = Logger.getLogger(ClusterMonitor.class.getName());
private final MonitorConfiguration configuration = new MonitorConfiguration();
private final NodeManager<T> nodeManager;
private final MonitorThread monitorThread;
private final AtomicBoolean closed = new AtomicBoolean(false);
/** A map from Node to corresponding MonitoredNode */
private final Map<T, TrafficNodeMonitor<T>> nodeMonitors = Collections.synchronizedMap(new LinkedHashMap<>());
private final Set<T> nodesToRemove = new LinkedHashSet<>();
private final Set<T> nodesToUpdate = new LinkedHashSet<>();
private boolean skipNextWait = false;
public ClusterMonitor(NodeManager<T> manager, boolean startPingThread) {
nodeManager = manager;
monitorThread = new MonitorThread("search.clustermonitor." + manager.name());
if (startPingThread) {
monitorThread.start();
}
}
/** Updates the monitored set of nodes, and waits for 1. data on new nodes, and 2. RPC completion of removed nodes. */
public synchronized void reconfigure(Collection<T> nodes) {
if ( ! monitorThread.isAlive()) throw new IllegalStateException("monitor thread must be alive for reconfiguration");
nodesToUpdate.addAll(nodes);
nodesToRemove.addAll(nodeMonitors.keySet());
nodesToRemove.removeAll(nodes);
for (T node : nodes) if ( ! nodeMonitors.containsKey(node)) add(node, true);
synchronized (nodeManager) { skipNextWait = true; nodeManager.notifyAll(); }
try { while ( ! nodesToRemove.isEmpty() || ! nodesToUpdate.isEmpty()) wait(1); }
catch (InterruptedException e) { throw new UncheckedInterruptedException(e, true); }
nodeManager.pingIterationCompleted();
}
public void start() {
if ( ! monitorThread.isAlive()) {
monitorThread.start();
}
}
/** Returns the configuration of this cluster monitor */
public MonitorConfiguration getConfiguration() { return configuration; }
public boolean isClosed() { return closed.get(); }
/**
* Adds a new node for monitoring.
* The object representing the node must
* <ul>
* <li>Have a sensible toString</li>
* <li>Have a sensible identity (equals and hashCode)</li>
* </ul>
*
* @param node the object representing the node
* @param internal whether this node is internal to this cluster
*/
public void add(T node, boolean internal) {
nodeMonitors.put(node, new TrafficNodeMonitor<>(node, configuration, internal));
}
/** Called from ClusterSearcher/NodeManager when a node failed */
public synchronized void failed(T node, ErrorMessage error) {
updateMonitoredNode(node, monitor -> monitor.failed(error), nodeManager::failed);
}
/** Called when a node responded */
public synchronized void responded(T node) {
updateMonitoredNode(node, TrafficNodeMonitor::responded, nodeManager::working);
}
/**
* Ping all nodes which needs pinging to discover state changes
*/
public synchronized void ping(Executor executor) {
for (var monitor : nodeMonitors()) {
if (closed.get()) return;
if (nodesToRemove.remove(monitor.getNode())) {
nodeMonitors.remove(monitor.getNode());
continue;
}
nodeManager.ping(this, monitor.getNode(), executor);
}
nodeManager.pingIterationCompleted();
}
/** Returns a thread-safe snapshot of the NodeMonitors of all added nodes */
public Iterator<BaseNodeMonitor<T>> nodeMonitorIterator() {
return nodeMonitors().iterator();
}
/** Returns a thread-safe snapshot of the NodeMonitors of all added nodes */
public List<BaseNodeMonitor<T>> nodeMonitors() {
return List.copyOf(nodeMonitors.values());
}
/** Must be called when this goes out of use */
public void shutdown() {
closed.set(true);
synchronized (this) {
nodeMonitors.clear();
}
synchronized (nodeManager) {
skipNextWait = true;
nodeManager.notifyAll();
}
try {
if (monitorThread.isAlive()) {
monitorThread.join();
}
} catch (InterruptedException e) {}
}
private class MonitorThread extends Thread {
MonitorThread(String name) {
super(name);
setDaemon(true);
}
public void run() {
log.info("Starting cluster monitor thread " + getName());
ExecutorService pingExecutor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("search.ping"));
while (!closed.get()) {
try {
log.finest("Activating ping");
ping(pingExecutor);
synchronized (nodeManager) {
if ( ! skipNextWait)
nodeManager.wait(configuration.getCheckInterval());
skipNextWait = false;
}
}
catch (Throwable e) {
if (closed.get() && e instanceof InterruptedException) {
break;
} else if ( ! (e instanceof Exception) ) {
log.log(Level.WARNING,"Error in monitor thread, will quit", e);
break;
} else {
log.log(Level.WARNING,"Exception in monitor thread", e);
}
}
}
pingExecutor.shutdown();
try {
if ( ! pingExecutor.awaitTermination(10, TimeUnit.SECONDS)) {
log.warning("Timeout waiting for ping executor to terminate");
}
} catch (InterruptedException e) { }
log.info("Stopped cluster monitor thread " + getName());
}
}
} | class ClusterMonitor<T> {
private static final Logger log = Logger.getLogger(ClusterMonitor.class.getName());
private final MonitorConfiguration configuration = new MonitorConfiguration();
private final NodeManager<T> nodeManager;
private final MonitorThread monitorThread;
private final AtomicBoolean closed = new AtomicBoolean(false);
/** A map from Node to corresponding MonitoredNode */
private final Map<T, TrafficNodeMonitor<T>> nodeMonitors = Collections.synchronizedMap(new LinkedHashMap<>());
private final Set<T> nodesToRemove = new LinkedHashSet<>();
private final Set<T> nodesToUpdate = new LinkedHashSet<>();
private boolean skipNextWait = false;
public ClusterMonitor(NodeManager<T> manager, boolean startPingThread) {
nodeManager = manager;
monitorThread = new MonitorThread("search.clustermonitor." + manager.name());
if (startPingThread) {
monitorThread.start();
}
}
/** Updates the monitored set of nodes, and waits for 1. data on new nodes, and 2. RPC completion of removed nodes. */
public synchronized void reconfigure(Collection<T> nodes) {
if ( ! monitorThread.isAlive()) throw new IllegalStateException("monitor thread must be alive for reconfiguration");
nodesToUpdate.addAll(nodes);
nodesToRemove.addAll(nodeMonitors.keySet());
nodesToRemove.removeAll(nodes);
for (T node : nodes) if ( ! nodeMonitors.containsKey(node)) add(node, true);
synchronized (nodeManager) { skipNextWait = true; nodeManager.notifyAll(); }
try { while ( ! nodesToRemove.isEmpty() || ! nodesToUpdate.isEmpty()) wait(1); }
catch (InterruptedException e) { throw new UncheckedInterruptedException(e, true); }
nodeManager.pingIterationCompleted();
}
public void start() {
if ( ! monitorThread.isAlive()) {
monitorThread.start();
}
}
/** Returns the configuration of this cluster monitor */
public MonitorConfiguration getConfiguration() { return configuration; }
public boolean isClosed() { return closed.get(); }
/**
* Adds a new node for monitoring.
* The object representing the node must
* <ul>
* <li>Have a sensible toString</li>
* <li>Have a sensible identity (equals and hashCode)</li>
* </ul>
*
* @param node the object representing the node
* @param internal whether this node is internal to this cluster
*/
public void add(T node, boolean internal) {
nodeMonitors.put(node, new TrafficNodeMonitor<>(node, configuration, internal));
}
/** Called from ClusterSearcher/NodeManager when a node failed */
public synchronized void failed(T node, ErrorMessage error) {
updateMonitoredNode(node, monitor -> monitor.failed(error), nodeManager::failed);
}
/** Called when a node responded */
public synchronized void responded(T node) {
updateMonitoredNode(node, TrafficNodeMonitor::responded, nodeManager::working);
}
/**
* Ping all nodes which needs pinging to discover state changes
*/
public synchronized void ping(Executor executor) {
for (var monitor : nodeMonitors()) {
if (closed.get()) return;
if (nodesToRemove.remove(monitor.getNode())) {
nodeMonitors.remove(monitor.getNode());
continue;
}
nodeManager.ping(this, monitor.getNode(), executor);
}
nodeManager.pingIterationCompleted();
}
/** Returns a thread-safe snapshot of the NodeMonitors of all added nodes */
public Iterator<BaseNodeMonitor<T>> nodeMonitorIterator() {
return nodeMonitors().iterator();
}
/** Returns a thread-safe snapshot of the NodeMonitors of all added nodes */
public List<BaseNodeMonitor<T>> nodeMonitors() {
return List.copyOf(nodeMonitors.values());
}
/** Must be called when this goes out of use */
public void shutdown() {
closed.set(true);
synchronized (this) {
nodeMonitors.clear();
}
synchronized (nodeManager) {
skipNextWait = true;
nodeManager.notifyAll();
}
try {
if (monitorThread.isAlive()) {
monitorThread.join();
}
} catch (InterruptedException e) {}
}
private class MonitorThread extends Thread {
MonitorThread(String name) {
super(name);
setDaemon(true);
}
public void run() {
log.info("Starting cluster monitor thread " + getName());
ExecutorService pingExecutor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("search.ping"));
while (!closed.get()) {
try {
log.finest("Activating ping");
ping(pingExecutor);
synchronized (nodeManager) {
if ( ! skipNextWait)
nodeManager.wait(configuration.getCheckInterval());
skipNextWait = false;
}
}
catch (Throwable e) {
if (closed.get() && e instanceof InterruptedException) {
break;
} else if ( ! (e instanceof Exception) ) {
log.log(Level.WARNING,"Error in monitor thread, will quit", e);
break;
} else {
log.log(Level.WARNING,"Exception in monitor thread", e);
}
}
}
pingExecutor.shutdown();
try {
if ( ! pingExecutor.awaitTermination(10, TimeUnit.SECONDS)) {
log.warning("Timeout waiting for ping executor to terminate");
}
} catch (InterruptedException e) { }
log.info("Stopped cluster monitor thread " + getName());
}
}
} |
Remove this? | private EndpointCertificate assignFromPool(Instance instance, ZoneId zone) {
Optional<AssignedCertificate> perInstanceAssignedCertificate = curator.readAssignedCertificate(TenantAndApplicationId.from(instance.id()), Optional.of(instance.name()));
if (perInstanceAssignedCertificate.isPresent() && perInstanceAssignedCertificate.get().certificate().randomizedId().isPresent()) {
return updateLastRequested(perInstanceAssignedCertificate.get()).certificate();
} else if (! zone.environment().isManuallyDeployed()){
TenantAndApplicationId application = TenantAndApplicationId.from(instance.id());
Optional<AssignedCertificate> perApplicationAssignedCertificate = curator.readAssignedCertificate(TenantAndApplicationId.from(instance.id()), Optional.empty());
if (perApplicationAssignedCertificate.isPresent() && perApplicationAssignedCertificate.get().certificate().randomizedId().isPresent()) {
return updateLastRequested(perApplicationAssignedCertificate.get()).certificate();
}
}
Optional<InstanceName> instanceName = zone.environment().isManuallyDeployed() ? Optional.of(instance.name()) : Optional.empty();
TenantAndApplicationId application = TenantAndApplicationId.from(instance.id());
try (Mutex lock = controller.curator().lockCertificatePool()) {
Optional<UnassignedCertificate> candidate = curator.readUnassignedCertificates().stream()
.filter(pc -> pc.state() == State.ready)
.min(Comparator.comparingLong(pc -> pc.certificate().lastRequested()));
if (candidate.isEmpty()) {
throw new IllegalArgumentException("No endpoint certificate available in pool, for deployment of " + instance.id() + " in " + zone);
}
try (NestedTransaction transaction = new NestedTransaction()) {
curator.removeUnassignedCertificate(candidate.get(), transaction);
curator.writeAssignedCertificate(new AssignedCertificate(application, instanceName, candidate.get().certificate()),
transaction);
transaction.commit();
return candidate.get().certificate();
}
}
} | private EndpointCertificate assignFromPool(Instance instance, ZoneId zone) {
Optional<AssignedCertificate> perInstanceAssignedCertificate = curator.readAssignedCertificate(TenantAndApplicationId.from(instance.id()), Optional.of(instance.name()));
if (perInstanceAssignedCertificate.isPresent() && perInstanceAssignedCertificate.get().certificate().randomizedId().isPresent()) {
return updateLastRequested(perInstanceAssignedCertificate.get()).certificate();
} else if (! zone.environment().isManuallyDeployed()){
TenantAndApplicationId application = TenantAndApplicationId.from(instance.id());
Optional<AssignedCertificate> perApplicationAssignedCertificate = curator.readAssignedCertificate(TenantAndApplicationId.from(instance.id()), Optional.empty());
if (perApplicationAssignedCertificate.isPresent() && perApplicationAssignedCertificate.get().certificate().randomizedId().isPresent()) {
return updateLastRequested(perApplicationAssignedCertificate.get()).certificate();
}
}
Optional<InstanceName> instanceName = zone.environment().isManuallyDeployed() ? Optional.of(instance.name()) : Optional.empty();
TenantAndApplicationId application = TenantAndApplicationId.from(instance.id());
try (Mutex lock = controller.curator().lockCertificatePool()) {
Optional<UnassignedCertificate> candidate = curator.readUnassignedCertificates().stream()
.filter(pc -> pc.state() == State.ready)
.min(Comparator.comparingLong(pc -> pc.certificate().lastRequested()));
if (candidate.isEmpty()) {
throw new IllegalArgumentException("No endpoint certificate available in pool, for deployment of " + instance.id() + " in " + zone);
}
try (NestedTransaction transaction = new NestedTransaction()) {
curator.removeUnassignedCertificate(candidate.get(), transaction);
curator.writeAssignedCertificate(new AssignedCertificate(application, instanceName, candidate.get().certificate()),
transaction);
transaction.commit();
return candidate.get().certificate();
}
}
} | class EndpointCertificates {
private static final Logger log = Logger.getLogger(EndpointCertificates.class.getName());
private final Controller controller;
private final CuratorDb curator;
private final Clock clock;
private final EndpointCertificateProvider certificateProvider;
private final EndpointCertificateValidator certificateValidator;
private final BooleanFlag useAlternateCertProvider;
private final StringFlag endpointCertificateAlgo;
private final static Duration GCP_CERTIFICATE_EXPIRY_TIME = Duration.ofDays(100);
public EndpointCertificates(Controller controller, EndpointCertificateProvider certificateProvider,
EndpointCertificateValidator certificateValidator) {
this.controller = controller;
this.useAlternateCertProvider = PermanentFlags.USE_ALTERNATIVE_ENDPOINT_CERTIFICATE_PROVIDER.bindTo(controller.flagSource());
this.endpointCertificateAlgo = PermanentFlags.ENDPOINT_CERTIFICATE_ALGORITHM.bindTo(controller.flagSource());
this.curator = controller.curator();
this.clock = controller.clock();
this.certificateProvider = certificateProvider;
this.certificateValidator = certificateValidator;
}
/** Returns a suitable certificate for endpoints of given instance and zone */
public Optional<EndpointCertificate> get(Instance instance, ZoneId zone, DeploymentSpec deploymentSpec) {
Instant start = clock.instant();
Optional<EndpointCertificate> cert = getOrProvision(instance, zone, deploymentSpec);
Duration duration = Duration.between(start, clock.instant());
if (duration.toSeconds() > 30)
log.log(Level.INFO, Text.format("Getting endpoint certificate for %s took %d seconds!", instance.id().serializedForm(), duration.toSeconds()));
if (controller.zoneRegistry().zones().all().in(CloudName.GCP).ids().contains(zone)) {
if (cert.isPresent()) {
certificateValidator.validate(cert.get(), instance.id().serializedForm(), zone, controller.routing().certificateDnsNames(new DeploymentId(instance.id(), zone), deploymentSpec));
GcpSecretStore gcpSecretStore = controller.serviceRegistry().gcpSecretStore();
String mangledCertName = "endpointCert_" + cert.get().certName().replace('.', '_') + "-v" + cert.get().version();
String mangledKeyName = "endpointCert_" + cert.get().keyName().replace('.', '_') + "-v" + cert.get().version();
if (gcpSecretStore.getLatestSecretVersion(mangledCertName) == null) {
gcpSecretStore.setSecret(mangledCertName,
Optional.of(GCP_CERTIFICATE_EXPIRY_TIME),
"endpoint-cert-accessor");
gcpSecretStore.addSecretVersion(mangledCertName,
controller.secretStore().getSecret(cert.get().certName(), cert.get().version()));
}
if (gcpSecretStore.getLatestSecretVersion(mangledKeyName) == null) {
gcpSecretStore.setSecret(mangledKeyName,
Optional.of(GCP_CERTIFICATE_EXPIRY_TIME),
"endpoint-cert-accessor");
gcpSecretStore.addSecretVersion(mangledKeyName,
controller.secretStore().getSecret(cert.get().keyName(), cert.get().version()));
}
return Optional.of(cert.get().withVersion(1).withKeyName(mangledKeyName).withCertName(mangledCertName));
}
}
return cert;
}
AssignedCertificate updateLastRequested(AssignedCertificate assignedCertificate) {
AssignedCertificate updated = assignedCertificate.with(assignedCertificate.certificate().withLastRequested(clock.instant().getEpochSecond()));
curator.writeAssignedCertificate(updated);
return updated;
}
private Optional<EndpointCertificate> getOrProvision(Instance instance, ZoneId zone, DeploymentSpec deploymentSpec) {
if (controller.routing().randomizedEndpointsEnabled(instance.id())) {
return Optional.of(assignFromPool(instance, zone));
}
Optional<AssignedCertificate> assignedCertificate = curator.readAssignedCertificate(TenantAndApplicationId.from(instance.id()), Optional.of(instance.id().instance()));
DeploymentId deployment = new DeploymentId(instance.id(), zone);
if (assignedCertificate.isEmpty()) {
var provisionedCertificate = provisionEndpointCertificate(deployment, Optional.empty(), deploymentSpec);
curator.writeAssignedCertificate(new AssignedCertificate(TenantAndApplicationId.from(instance.id()), Optional.of(instance.id().instance()), provisionedCertificate));
return Optional.of(provisionedCertificate);
} else {
AssignedCertificate updated = assignedCertificate.get().with(assignedCertificate.get().certificate().withLastRequested(clock.instant().getEpochSecond()));
curator.writeAssignedCertificate(updated);
}
Optional<EndpointCertificate> currentCertificate = assignedCertificate.map(AssignedCertificate::certificate);
var requiredSansForZone = currentCertificate.get().randomizedId().isEmpty() ?
controller.routing().certificateDnsNames(deployment, deploymentSpec) :
List.<String>of();
if (!currentCertificate.get().requestedDnsSans().containsAll(requiredSansForZone)) {
var reprovisionedCertificate =
provisionEndpointCertificate(deployment, currentCertificate, deploymentSpec)
.withRootRequestId(currentCertificate.get().rootRequestId());
curator.writeAssignedCertificate(assignedCertificate.get().with(reprovisionedCertificate));
certificateValidator.validate(reprovisionedCertificate, instance.id().serializedForm(), zone, requiredSansForZone);
return Optional.of(reprovisionedCertificate);
}
certificateValidator.validate(currentCertificate.get(), instance.id().serializedForm(), zone, requiredSansForZone);
return currentCertificate;
}
private EndpointCertificate provisionEndpointCertificate(DeploymentId deployment,
Optional<EndpointCertificate> currentCert,
DeploymentSpec deploymentSpec) {
List<ZoneId> zonesInSystem = controller.zoneRegistry().zones().controllerUpgraded().ids();
Set<ZoneId> requiredZones = new LinkedHashSet<>();
requiredZones.add(deployment.zoneId());
if (!deployment.zoneId().environment().isManuallyDeployed()) {
Optional<DeploymentInstanceSpec> instanceSpec = deploymentSpec.instance(deployment.applicationId().instance());
zonesInSystem.stream()
.filter(zone -> zone.environment().isTest() ||
(instanceSpec.isPresent() &&
instanceSpec.get().deploysTo(zone.environment(), zone.region())))
.forEach(requiredZones::add);
}
/* TODO(andreer/mpolden): To allow a seamless transition of existing deployments to using generated endpoints,
we need to something like this:
1) All current certificates must be re-provisioned to contain the same wildcard names
as CertificatePoolMaintainer, and a randomized ID
2) Generated endpoints must be exposed *before* switching deployment to a
pre-provisioned certificate
3) Tenants must shift their traffic to generated endpoints
4) We can switch to the pre-provisioned certificate. This will invalidate
non-generated endpoints
*/
Set<String> requiredNames = requiredZones.stream()
.flatMap(zone -> controller.routing().certificateDnsNames(new DeploymentId(deployment.applicationId(), zone),
deploymentSpec)
.stream())
.collect(Collectors.toCollection(LinkedHashSet::new));
List<String> currentNames = currentCert.map(EndpointCertificate::requestedDnsSans)
.orElseGet(List::of);
zonesInSystem.stream()
.map(zone -> controller.routing().certificateDnsNames(new DeploymentId(deployment.applicationId(), zone), deploymentSpec))
.filter(currentNames::containsAll)
.forEach(requiredNames::addAll);
log.log(Level.INFO, String.format("Requesting new endpoint certificate from Cameo for application %s", deployment.applicationId().serializedForm()));
String algo = this.endpointCertificateAlgo.with(FetchVector.Dimension.APPLICATION_ID, deployment.applicationId().serializedForm()).value();
boolean useAlternativeProvider = useAlternateCertProvider.with(FetchVector.Dimension.APPLICATION_ID, deployment.applicationId().serializedForm()).value();
String keyPrefix = deployment.applicationId().toFullString();
var t0 = Instant.now();
EndpointCertificate endpointCertificate = certificateProvider.requestCaSignedCertificate(keyPrefix, List.copyOf(requiredNames), currentCert, algo, useAlternativeProvider);
var t1 = Instant.now();
log.log(Level.INFO, String.format("Endpoint certificate request for application %s returned after %s", deployment.applicationId().serializedForm(), Duration.between(t0, t1)));
return endpointCertificate;
}
} | class EndpointCertificates {
private static final Logger log = Logger.getLogger(EndpointCertificates.class.getName());
private final Controller controller;
private final CuratorDb curator;
private final Clock clock;
private final EndpointCertificateProvider certificateProvider;
private final EndpointCertificateValidator certificateValidator;
private final BooleanFlag useAlternateCertProvider;
private final StringFlag endpointCertificateAlgo;
private final static Duration GCP_CERTIFICATE_EXPIRY_TIME = Duration.ofDays(100);
public EndpointCertificates(Controller controller, EndpointCertificateProvider certificateProvider,
EndpointCertificateValidator certificateValidator) {
this.controller = controller;
this.useAlternateCertProvider = PermanentFlags.USE_ALTERNATIVE_ENDPOINT_CERTIFICATE_PROVIDER.bindTo(controller.flagSource());
this.endpointCertificateAlgo = PermanentFlags.ENDPOINT_CERTIFICATE_ALGORITHM.bindTo(controller.flagSource());
this.curator = controller.curator();
this.clock = controller.clock();
this.certificateProvider = certificateProvider;
this.certificateValidator = certificateValidator;
}
/** Returns a suitable certificate for endpoints of given instance and zone */
public Optional<EndpointCertificate> get(Instance instance, ZoneId zone, DeploymentSpec deploymentSpec) {
Instant start = clock.instant();
Optional<EndpointCertificate> cert = getOrProvision(instance, zone, deploymentSpec);
Duration duration = Duration.between(start, clock.instant());
if (duration.toSeconds() > 30)
log.log(Level.INFO, Text.format("Getting endpoint certificate for %s took %d seconds!", instance.id().serializedForm(), duration.toSeconds()));
if (controller.zoneRegistry().zones().all().in(CloudName.GCP).ids().contains(zone)) {
if (cert.isPresent()) {
certificateValidator.validate(cert.get(), instance.id().serializedForm(), zone, controller.routing().certificateDnsNames(new DeploymentId(instance.id(), zone), deploymentSpec));
GcpSecretStore gcpSecretStore = controller.serviceRegistry().gcpSecretStore();
String mangledCertName = "endpointCert_" + cert.get().certName().replace('.', '_') + "-v" + cert.get().version();
String mangledKeyName = "endpointCert_" + cert.get().keyName().replace('.', '_') + "-v" + cert.get().version();
if (gcpSecretStore.getLatestSecretVersion(mangledCertName) == null) {
gcpSecretStore.setSecret(mangledCertName,
Optional.of(GCP_CERTIFICATE_EXPIRY_TIME),
"endpoint-cert-accessor");
gcpSecretStore.addSecretVersion(mangledCertName,
controller.secretStore().getSecret(cert.get().certName(), cert.get().version()));
}
if (gcpSecretStore.getLatestSecretVersion(mangledKeyName) == null) {
gcpSecretStore.setSecret(mangledKeyName,
Optional.of(GCP_CERTIFICATE_EXPIRY_TIME),
"endpoint-cert-accessor");
gcpSecretStore.addSecretVersion(mangledKeyName,
controller.secretStore().getSecret(cert.get().keyName(), cert.get().version()));
}
return Optional.of(cert.get().withVersion(1).withKeyName(mangledKeyName).withCertName(mangledCertName));
}
}
return cert;
}
AssignedCertificate updateLastRequested(AssignedCertificate assignedCertificate) {
AssignedCertificate updated = assignedCertificate.with(assignedCertificate.certificate().withLastRequested(clock.instant().getEpochSecond()));
curator.writeAssignedCertificate(updated);
return updated;
}
private Optional<EndpointCertificate> getOrProvision(Instance instance, ZoneId zone, DeploymentSpec deploymentSpec) {
if (controller.routing().randomizedEndpointsEnabled(instance.id())) {
return Optional.of(assignFromPool(instance, zone));
}
Optional<AssignedCertificate> assignedCertificate = curator.readAssignedCertificate(TenantAndApplicationId.from(instance.id()), Optional.of(instance.id().instance()));
DeploymentId deployment = new DeploymentId(instance.id(), zone);
if (assignedCertificate.isEmpty()) {
var provisionedCertificate = provisionEndpointCertificate(deployment, Optional.empty(), deploymentSpec);
curator.writeAssignedCertificate(new AssignedCertificate(TenantAndApplicationId.from(instance.id()), Optional.of(instance.id().instance()), provisionedCertificate));
return Optional.of(provisionedCertificate);
} else {
AssignedCertificate updated = assignedCertificate.get().with(assignedCertificate.get().certificate().withLastRequested(clock.instant().getEpochSecond()));
curator.writeAssignedCertificate(updated);
}
Optional<EndpointCertificate> currentCertificate = assignedCertificate.map(AssignedCertificate::certificate);
var requiredSansForZone = currentCertificate.get().randomizedId().isEmpty() ?
controller.routing().certificateDnsNames(deployment, deploymentSpec) :
List.<String>of();
if (!currentCertificate.get().requestedDnsSans().containsAll(requiredSansForZone)) {
var reprovisionedCertificate =
provisionEndpointCertificate(deployment, currentCertificate, deploymentSpec)
.withRootRequestId(currentCertificate.get().rootRequestId());
curator.writeAssignedCertificate(assignedCertificate.get().with(reprovisionedCertificate));
certificateValidator.validate(reprovisionedCertificate, instance.id().serializedForm(), zone, requiredSansForZone);
return Optional.of(reprovisionedCertificate);
}
certificateValidator.validate(currentCertificate.get(), instance.id().serializedForm(), zone, requiredSansForZone);
return currentCertificate;
}
private EndpointCertificate provisionEndpointCertificate(DeploymentId deployment,
Optional<EndpointCertificate> currentCert,
DeploymentSpec deploymentSpec) {
List<ZoneId> zonesInSystem = controller.zoneRegistry().zones().controllerUpgraded().ids();
Set<ZoneId> requiredZones = new LinkedHashSet<>();
requiredZones.add(deployment.zoneId());
if (!deployment.zoneId().environment().isManuallyDeployed()) {
Optional<DeploymentInstanceSpec> instanceSpec = deploymentSpec.instance(deployment.applicationId().instance());
zonesInSystem.stream()
.filter(zone -> zone.environment().isTest() ||
(instanceSpec.isPresent() &&
instanceSpec.get().deploysTo(zone.environment(), zone.region())))
.forEach(requiredZones::add);
}
/* TODO(andreer/mpolden): To allow a seamless transition of existing deployments to using generated endpoints,
we need to something like this:
1) All current certificates must be re-provisioned to contain the same wildcard names
as CertificatePoolMaintainer, and a randomized ID
2) Generated endpoints must be exposed *before* switching deployment to a
pre-provisioned certificate
3) Tenants must shift their traffic to generated endpoints
4) We can switch to the pre-provisioned certificate. This will invalidate
non-generated endpoints
*/
Set<String> requiredNames = requiredZones.stream()
.flatMap(zone -> controller.routing().certificateDnsNames(new DeploymentId(deployment.applicationId(), zone),
deploymentSpec)
.stream())
.collect(Collectors.toCollection(LinkedHashSet::new));
List<String> currentNames = currentCert.map(EndpointCertificate::requestedDnsSans)
.orElseGet(List::of);
zonesInSystem.stream()
.map(zone -> controller.routing().certificateDnsNames(new DeploymentId(deployment.applicationId(), zone), deploymentSpec))
.filter(currentNames::containsAll)
.forEach(requiredNames::addAll);
log.log(Level.INFO, String.format("Requesting new endpoint certificate from Cameo for application %s", deployment.applicationId().serializedForm()));
String algo = this.endpointCertificateAlgo.with(FetchVector.Dimension.APPLICATION_ID, deployment.applicationId().serializedForm()).value();
boolean useAlternativeProvider = useAlternateCertProvider.with(FetchVector.Dimension.APPLICATION_ID, deployment.applicationId().serializedForm()).value();
String keyPrefix = deployment.applicationId().toFullString();
var t0 = Instant.now();
EndpointCertificate endpointCertificate = certificateProvider.requestCaSignedCertificate(keyPrefix, List.copyOf(requiredNames), currentCert, algo, useAlternativeProvider);
var t1 = Instant.now();
log.log(Level.INFO, String.format("Endpoint certificate request for application %s returned after %s", deployment.applicationId().serializedForm(), Duration.between(t0, t1)));
return endpointCertificate;
}
} | |
This `if` is unnecessary as it is covered by the below `if (system == SystemName.PublicCd)` etc | private Set<Version> modelVersionsInUse() {
var system = controller().system();
var versions = versionsForSystem(system);
if (controller().system().isCd()) {
if (system == SystemName.PublicCd)
versions.addAll(versionsForSystem(SystemName.Public));
else if (system == SystemName.cd)
versions.addAll(versionsForSystem(SystemName.main));
}
log.log(INFO, "model versions in use : " + versions);
return versions;
} | if (controller().system().isCd()) { | private Set<Version> modelVersionsInUse() {
var system = controller().system();
var versions = versionsForSystem(system);
if (system == SystemName.PublicCd)
versions.addAll(versionsForSystem(SystemName.Public));
else if (system == SystemName.cd)
versions.addAll(versionsForSystem(SystemName.main));
log.log(FINE, "model versions in use : " + versions);
return versions;
} | class ArtifactExpirer extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(ArtifactExpirer.class.getName());
private static final Duration MIN_AGE = Duration.ofDays(14);
private final Path configModelPath;
public ArtifactExpirer(Controller controller, Duration interval) {
this(controller, interval, Paths.get(Defaults.getDefaults().underVespaHome("conf/configserver-app/")));
}
public ArtifactExpirer(Controller controller, Duration interval, Path configModelPath) {
super(controller, interval);
this.configModelPath = configModelPath;
}
@Override
protected double maintain() {
VersionStatus versionStatus = controller().readVersionStatus();
return controller().clouds().stream()
.flatMapToDouble(cloud ->
controller().serviceRegistry().artifactRegistry(cloud).stream()
.mapToDouble(artifactRegistry -> maintain(versionStatus, cloud, artifactRegistry)))
.average()
.orElse(1);
}
private double maintain(VersionStatus versionStatus, CloudName cloudName, ArtifactRegistry artifactRegistry) {
try {
Instant now = controller().clock().instant();
List<Artifact> artifactsToExpire = artifactRegistry.list().stream()
.filter(artifact -> isExpired(artifact, now, versionStatus, modelVersionsInUse()))
.toList();
if (!artifactsToExpire.isEmpty()) {
log.log(INFO, "Expiring " + artifactsToExpire.size() + " artifacts in " + cloudName + ": " + artifactsToExpire);
artifactRegistry.deleteAll(artifactsToExpire);
}
return 0;
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to expire artifacts in " + cloudName + ". Will retry in " + interval(), e);
return 1;
}
}
/** Returns whether given artifact is expired */
private boolean isExpired(Artifact artifact, Instant now, VersionStatus versionStatus, Set<Version> versionsInUse) {
List<VespaVersion> versions = versionStatus.versions();
versionsInUse.addAll(versions.stream().map(VespaVersion::versionNumber).collect(Collectors.toSet()));
if (versionsInUse.contains(artifact.version())) return false;
if (versionStatus.isActive(artifact.version())) return false;
if (artifact.createdAt().isAfter(now.minus(MIN_AGE))) return false;
Version maxVersion = versions.stream().map(VespaVersion::versionNumber).max(Comparator.naturalOrder()).get();
if (artifact.version().isAfter(maxVersion)) return false;
return true;
}
/** Model versions in use in this system, and, if this is a CD system, in the main/public system */
private Set<Version> versionsForSystem(SystemName systemName) {
var versions = readConfigModelVersionsForSystem(systemName.name());
log.log(INFO, "versions for system " + systemName.name() + ": " + versions);
return versions;
}
private Set<Version> readConfigModelVersionsForSystem(String systemName) {
List<String> lines = uncheck(() -> Files.readAllLines(configModelPath.resolve("config-models-" + systemName + ".xml")));
return lines.stream()
.filter(line -> line.contains("VespaModelFactory."))
.map(line -> line.substring(line.indexOf("id='VespaModelFactory") + 22, line.indexOf("' class")))
.map(Version::fromString)
.collect(Collectors.toSet());
}
} | class ArtifactExpirer extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(ArtifactExpirer.class.getName());
private static final Duration MIN_AGE = Duration.ofDays(14);
private final Path configModelPath;
public ArtifactExpirer(Controller controller, Duration interval) {
this(controller, interval, Paths.get(Defaults.getDefaults().underVespaHome("conf/configserver-app/")));
}
public ArtifactExpirer(Controller controller, Duration interval, Path configModelPath) {
super(controller, interval);
this.configModelPath = configModelPath;
}
@Override
protected double maintain() {
VersionStatus versionStatus = controller().readVersionStatus();
return controller().clouds().stream()
.flatMapToDouble(cloud ->
controller().serviceRegistry().artifactRegistry(cloud).stream()
.mapToDouble(artifactRegistry -> maintain(versionStatus, cloud, artifactRegistry)))
.average()
.orElse(1);
}
private double maintain(VersionStatus versionStatus, CloudName cloudName, ArtifactRegistry artifactRegistry) {
try {
Instant now = controller().clock().instant();
List<Artifact> artifactsToExpire = artifactRegistry.list().stream()
.filter(artifact -> isExpired(artifact, now, versionStatus, modelVersionsInUse()))
.toList();
if (!artifactsToExpire.isEmpty()) {
log.log(INFO, "Expiring " + artifactsToExpire.size() + " artifacts in " + cloudName + ": " + artifactsToExpire);
artifactRegistry.deleteAll(artifactsToExpire);
}
return 0;
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to expire artifacts in " + cloudName + ". Will retry in " + interval(), e);
return 1;
}
}
/** Returns whether given artifact is expired */
private boolean isExpired(Artifact artifact, Instant now, VersionStatus versionStatus, Set<Version> versionsInUse) {
List<VespaVersion> versions = versionStatus.versions();
versionsInUse.addAll(versions.stream().map(VespaVersion::versionNumber).collect(Collectors.toSet()));
if (versionsInUse.contains(artifact.version())) return false;
if (versionStatus.isActive(artifact.version())) return false;
if (artifact.createdAt().isAfter(now.minus(MIN_AGE))) return false;
Version maxVersion = versions.stream().map(VespaVersion::versionNumber).max(Comparator.naturalOrder()).get();
if (artifact.version().isAfter(maxVersion)) return false;
return true;
}
/** Model versions in use in this system, and, if this is a CD system, in the main/public system */
private Set<Version> versionsForSystem(SystemName systemName) {
var versions = readConfigModelVersionsForSystem(systemName.name());
log.log(FINE, "versions for system " + systemName.name() + ": " + versions);
return versions;
}
private Set<Version> readConfigModelVersionsForSystem(String systemName) {
List<String> lines = uncheck(() -> Files.readAllLines(configModelPath.resolve("config-models-" + systemName + ".xml")));
var stringToMatch = "id='VespaModelFactory.";
return lines.stream()
.filter(line -> line.contains(stringToMatch))
.map(line -> {
var start = line.indexOf(stringToMatch) + stringToMatch.length();
int end = line.indexOf("'", start);
return line.substring(start, end);
})
.map(Version::fromString)
.collect(Collectors.toSet());
}
} |
`+ 22` comes from `"id='VespaModelFactory".size()` ? Extract `"id='VespaModelFactory"` as a constant? With `startIndex = line.indexOf("id='VespaModelFactory") + 22`, the end index of the version would be `lineIndex.indexOf("'", startIndex)`, which avoids relying on the presence of the following `class` tag. | private Set<Version> readConfigModelVersionsForSystem(String systemName) {
List<String> lines = uncheck(() -> Files.readAllLines(configModelPath.resolve("config-models-" + systemName + ".xml")));
return lines.stream()
.filter(line -> line.contains("VespaModelFactory."))
.map(line -> line.substring(line.indexOf("id='VespaModelFactory") + 22, line.indexOf("' class")))
.map(Version::fromString)
.collect(Collectors.toSet());
} | .map(line -> line.substring(line.indexOf("id='VespaModelFactory") + 22, line.indexOf("' class"))) | private Set<Version> readConfigModelVersionsForSystem(String systemName) {
List<String> lines = uncheck(() -> Files.readAllLines(configModelPath.resolve("config-models-" + systemName + ".xml")));
var stringToMatch = "id='VespaModelFactory.";
return lines.stream()
.filter(line -> line.contains(stringToMatch))
.map(line -> {
var start = line.indexOf(stringToMatch) + stringToMatch.length();
int end = line.indexOf("'", start);
return line.substring(start, end);
})
.map(Version::fromString)
.collect(Collectors.toSet());
} | class ArtifactExpirer extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(ArtifactExpirer.class.getName());
private static final Duration MIN_AGE = Duration.ofDays(14);
private final Path configModelPath;
public ArtifactExpirer(Controller controller, Duration interval) {
this(controller, interval, Paths.get(Defaults.getDefaults().underVespaHome("conf/configserver-app/")));
}
public ArtifactExpirer(Controller controller, Duration interval, Path configModelPath) {
super(controller, interval);
this.configModelPath = configModelPath;
}
@Override
protected double maintain() {
VersionStatus versionStatus = controller().readVersionStatus();
return controller().clouds().stream()
.flatMapToDouble(cloud ->
controller().serviceRegistry().artifactRegistry(cloud).stream()
.mapToDouble(artifactRegistry -> maintain(versionStatus, cloud, artifactRegistry)))
.average()
.orElse(1);
}
private double maintain(VersionStatus versionStatus, CloudName cloudName, ArtifactRegistry artifactRegistry) {
try {
Instant now = controller().clock().instant();
List<Artifact> artifactsToExpire = artifactRegistry.list().stream()
.filter(artifact -> isExpired(artifact, now, versionStatus, modelVersionsInUse()))
.toList();
if (!artifactsToExpire.isEmpty()) {
log.log(INFO, "Expiring " + artifactsToExpire.size() + " artifacts in " + cloudName + ": " + artifactsToExpire);
artifactRegistry.deleteAll(artifactsToExpire);
}
return 0;
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to expire artifacts in " + cloudName + ". Will retry in " + interval(), e);
return 1;
}
}
/** Returns whether given artifact is expired */
private boolean isExpired(Artifact artifact, Instant now, VersionStatus versionStatus, Set<Version> versionsInUse) {
List<VespaVersion> versions = versionStatus.versions();
versionsInUse.addAll(versions.stream().map(VespaVersion::versionNumber).collect(Collectors.toSet()));
if (versionsInUse.contains(artifact.version())) return false;
if (versionStatus.isActive(artifact.version())) return false;
if (artifact.createdAt().isAfter(now.minus(MIN_AGE))) return false;
Version maxVersion = versions.stream().map(VespaVersion::versionNumber).max(Comparator.naturalOrder()).get();
if (artifact.version().isAfter(maxVersion)) return false;
return true;
}
/** Model versions in use in this system, and, if this is a CD system, in the main/public system */
private Set<Version> modelVersionsInUse() {
var system = controller().system();
var versions = versionsForSystem(system);
if (controller().system().isCd()) {
if (system == SystemName.PublicCd)
versions.addAll(versionsForSystem(SystemName.Public));
else if (system == SystemName.cd)
versions.addAll(versionsForSystem(SystemName.main));
}
log.log(INFO, "model versions in use : " + versions);
return versions;
}
private Set<Version> versionsForSystem(SystemName systemName) {
var versions = readConfigModelVersionsForSystem(systemName.name());
log.log(INFO, "versions for system " + systemName.name() + ": " + versions);
return versions;
}
} | class ArtifactExpirer extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(ArtifactExpirer.class.getName());
private static final Duration MIN_AGE = Duration.ofDays(14);
private final Path configModelPath;
public ArtifactExpirer(Controller controller, Duration interval) {
this(controller, interval, Paths.get(Defaults.getDefaults().underVespaHome("conf/configserver-app/")));
}
public ArtifactExpirer(Controller controller, Duration interval, Path configModelPath) {
super(controller, interval);
this.configModelPath = configModelPath;
}
@Override
protected double maintain() {
VersionStatus versionStatus = controller().readVersionStatus();
return controller().clouds().stream()
.flatMapToDouble(cloud ->
controller().serviceRegistry().artifactRegistry(cloud).stream()
.mapToDouble(artifactRegistry -> maintain(versionStatus, cloud, artifactRegistry)))
.average()
.orElse(1);
}
private double maintain(VersionStatus versionStatus, CloudName cloudName, ArtifactRegistry artifactRegistry) {
try {
Instant now = controller().clock().instant();
List<Artifact> artifactsToExpire = artifactRegistry.list().stream()
.filter(artifact -> isExpired(artifact, now, versionStatus, modelVersionsInUse()))
.toList();
if (!artifactsToExpire.isEmpty()) {
log.log(INFO, "Expiring " + artifactsToExpire.size() + " artifacts in " + cloudName + ": " + artifactsToExpire);
artifactRegistry.deleteAll(artifactsToExpire);
}
return 0;
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to expire artifacts in " + cloudName + ". Will retry in " + interval(), e);
return 1;
}
}
/** Returns whether given artifact is expired */
private boolean isExpired(Artifact artifact, Instant now, VersionStatus versionStatus, Set<Version> versionsInUse) {
List<VespaVersion> versions = versionStatus.versions();
versionsInUse.addAll(versions.stream().map(VespaVersion::versionNumber).collect(Collectors.toSet()));
if (versionsInUse.contains(artifact.version())) return false;
if (versionStatus.isActive(artifact.version())) return false;
if (artifact.createdAt().isAfter(now.minus(MIN_AGE))) return false;
Version maxVersion = versions.stream().map(VespaVersion::versionNumber).max(Comparator.naturalOrder()).get();
if (artifact.version().isAfter(maxVersion)) return false;
return true;
}
/** Model versions in use in this system, and, if this is a CD system, in the main/public system */
private Set<Version> modelVersionsInUse() {
var system = controller().system();
var versions = versionsForSystem(system);
if (system == SystemName.PublicCd)
versions.addAll(versionsForSystem(SystemName.Public));
else if (system == SystemName.cd)
versions.addAll(versionsForSystem(SystemName.main));
log.log(FINE, "model versions in use : " + versions);
return versions;
}
private Set<Version> versionsForSystem(SystemName systemName) {
var versions = readConfigModelVersionsForSystem(systemName.name());
log.log(FINE, "versions for system " + systemName.name() + ": " + versions);
return versions;
}
} |
So this is where they are sorted... Remove sorting? | private void toSlime(List<String> addresses, Cursor array, boolean dummyDueToErasure) {
addresses.stream().map(IP::parse).sorted(IP.NATURAL_ORDER).map(IP::asString).forEach(array::addString);
} | addresses.stream().map(IP::parse).sorted(IP.NATURAL_ORDER).map(IP::asString).forEach(array::addString); | private void toSlime(List<String> addresses, Cursor array, boolean dummyDueToErasure) {
addresses.stream().map(IP::parse).sorted(IP.NATURAL_ORDER).map(IP::asString).forEach(array::addString);
} | class NodeSerializer {
/** The configured node flavors */
private final NodeFlavors flavors;
private static final String stateKey = "state";
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String ipAddressPoolKey = "additionalIpAddresses";
private static final String containersKey = "containers";
private static final String containerHostnameKey = "hostname";
private static final String idKey = "openStackId";
private static final String extraIdKey = "extraId";
private static final String parentHostnameKey = "parentHostname";
private static final String historyKey = "history";
private static final String logKey = "log";
private static final String instanceKey = "instance";
private static final String rebootGenerationKey = "rebootGeneration";
private static final String currentRebootGenerationKey = "currentRebootGeneration";
private static final String vespaVersionKey = "vespaVersion";
private static final String currentContainerImageKey = "currentDockerImage";
private static final String failCountKey = "failCount";
private static final String nodeTypeKey = "type";
private static final String wantToRetireKey = "wantToRetire";
private static final String wantToDeprovisionKey = "wantToDeprovision";
private static final String wantToRebuildKey = "wantToRebuild";
private static final String preferToRetireKey = "preferToRetire";
private static final String wantToFailKey = "wantToFailKey";
private static final String wantToUpgradeFlavorKey = "wantToUpgradeFlavor";
private static final String osVersionKey = "osVersion";
private static final String wantedOsVersionKey = "wantedOsVersion";
private static final String firmwareCheckKey = "firmwareCheck";
private static final String reportsKey = "reports";
private static final String modelNameKey = "modelName";
private static final String reservedToKey = "reservedTo";
private static final String exclusiveToApplicationIdKey = "exclusiveTo";
private static final String hostTTLKey = "hostTTL";
private static final String hostEmptyAtKey = "hostEmptyAt";
private static final String exclusiveToClusterTypeKey = "exclusiveToClusterType";
private static final String switchHostnameKey = "switchHostname";
private static final String trustedCertificatesKey = "trustedCertificates";
private static final String cloudAccountKey = "cloudAccount";
private static final String wireguardPubKeyKey = "wireguardPubkey";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String diskKey = "disk";
private static final String tenantIdKey = "tenantId";
private static final String applicationIdKey = "applicationId";
private static final String instanceIdKey = "instanceId";
private static final String serviceIdKey = "serviceId";
private static final String requestedResourcesKey = "requestedResources";
private static final String restartGenerationKey = "restartGeneration";
private static final String currentRestartGenerationKey = "currentRestartGeneration";
private static final String removableKey = "removable";
private static final String reusableKey = "reusable";
private static final String wantedVespaVersionKey = "wantedVespaVersion";
private static final String wantedContainerImageRepoKey = "wantedDockerImageRepo";
private static final String historyEventTypeKey = "type";
private static final String atKey = "at";
private static final String agentKey = "agent";
private static final String networkPortsKey = "networkPorts";
private static final String fingerprintKey = "fingerprint";
private static final String expiresKey = "expires";
public NodeSerializer(NodeFlavors flavors) {
this.flavors = flavors;
}
public byte[] toJson(Node node) {
try {
Slime slime = new Slime();
toSlime(node, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new RuntimeException("Serialization of " + node + " to json failed", e);
}
}
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
object.setString(stateKey, toString(node.state()));
toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey), true);
toSlime(node.ipConfig().pool().ips(), object.setArray(ipAddressPoolKey), true);
toSlime(node.ipConfig().pool().hostnames(), object);
object.setString(idKey, node.id());
node.extraId().ifPresent(id -> object.setString(extraIdKey, id));
node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname));
toSlime(node.flavor(), object);
object.setLong(rebootGenerationKey, node.status().reboot().wanted());
object.setLong(currentRebootGenerationKey, node.status().reboot().current());
node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString()));
node.status().containerImage().ifPresent(image -> object.setString(currentContainerImageKey, image.asString()));
object.setLong(failCountKey, node.status().failCount());
object.setBool(wantToRetireKey, node.status().wantToRetire());
object.setBool(preferToRetireKey, node.status().preferToRetire());
object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision());
object.setBool(wantToFailKey, node.status().wantToFail());
object.setBool(wantToRebuildKey, node.status().wantToRebuild());
object.setBool(wantToUpgradeFlavorKey, node.status().wantToUpgradeFlavor());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history().events(), object.setArray(historyKey));
toSlime(node.history().log(), object.setArray(logKey));
object.setString(nodeTypeKey, toString(node.type()));
node.status().osVersion().current().ifPresent(version -> object.setString(osVersionKey, version.toString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString(wantedOsVersionKey, version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli()));
node.switchHostname().ifPresent(switchHostname -> object.setString(switchHostnameKey, switchHostname));
node.reports().toSlime(object, reportsKey);
node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName));
node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value()));
node.exclusiveToApplicationId().ifPresent(applicationId -> object.setString(exclusiveToApplicationIdKey, applicationId.serializedForm()));
node.hostTTL().ifPresent(hostTTL -> object.setLong(hostTTLKey, hostTTL.toMillis()));
node.hostEmptyAt().ifPresent(emptyAt -> object.setLong(hostEmptyAtKey, emptyAt.toEpochMilli()));
node.exclusiveToClusterType().ifPresent(clusterType -> object.setString(exclusiveToClusterTypeKey, clusterType.name()));
trustedCertificatesToSlime(node.trustedCertificates(), object.setArray(trustedCertificatesKey));
if (!node.cloudAccount().isUnspecified()) {
object.setString(cloudAccountKey, node.cloudAccount().value());
}
node.wireguardPubKey().ifPresent(pubKey -> object.setString(wireguardPubKeyKey, pubKey.value()));
}
private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
if (flavor.flavorOverrides().isPresent()) {
Cursor resourcesObject = object.setObject(resourcesKey);
flavor.flavorOverrides().get().diskGb().ifPresent(diskGb -> resourcesObject.setDouble(diskKey, diskGb));
}
}
else {
NodeResourcesSerializer.toSlime(flavor.resources(), object.setObject(resourcesKey));
}
}
private void toSlime(Allocation allocation, Cursor object) {
NodeResourcesSerializer.toSlime(allocation.requestedResources(), object.setObject(requestedResourcesKey));
object.setString(tenantIdKey, allocation.owner().tenant().value());
object.setString(applicationIdKey, allocation.owner().application().value());
object.setString(instanceIdKey, allocation.owner().instance().value());
object.setString(serviceIdKey, allocation.membership().stringValue());
object.setLong(restartGenerationKey, allocation.restartGeneration().wanted());
object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current());
object.setBool(removableKey, allocation.removable());
object.setBool(reusableKey, allocation.reusable());
object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString());
allocation.membership().cluster().dockerImageRepo().ifPresent(repo -> object.setString(wantedContainerImageRepoKey, repo.untagged()));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(networkPortsKey)));
}
private void toSlime(Collection<History.Event> events, Cursor array) {
for (History.Event event : events)
toSlime(event, array.addObject());
}
private void toSlime(History.Event event, Cursor object) {
object.setString(historyEventTypeKey, toString(event.type()));
object.setLong(atKey, event.at().toEpochMilli());
object.setString(agentKey, toString(event.agent()));
}
private void toSlime(List<HostName> hostnames, Cursor object) {
if (hostnames.isEmpty()) return;
Cursor containersArray = object.setArray(containersKey);
hostnames.forEach(hostname -> {
containersArray.addObject().setString(containerHostnameKey, hostname.value());
});
}
private void trustedCertificatesToSlime(List<TrustStoreItem> trustStoreItems, Cursor array) {
trustStoreItems.forEach(cert -> {
Cursor object = array.addObject();
object.setString(fingerprintKey, cert.fingerprint());
object.setLong(expiresKey, cert.expiry().toEpochMilli());
});
}
public Node fromJson(byte[] data) {
return nodeFromSlime(SlimeUtils.jsonToSlime(data).get());
}
private Node nodeFromSlime(Inspector object) {
Flavor flavor = flavorFromSlime(object);
return new Node(object.field(idKey).asString(),
SlimeUtils.optionalString(object.field(extraIdKey)),
IP.Config.of(ipAddressesFromSlime(object, ipAddressesKey),
ipAddressesFromSlime(object, ipAddressPoolKey),
hostnamesFromSlime(object)),
object.field(hostnameKey).asString(),
SlimeUtils.optionalString(object.field(parentHostnameKey)),
flavor,
statusFromSlime(object),
nodeStateFromString(object.field(stateKey).asString()),
allocationFromSlime(flavor.resources(), object.field(instanceKey)),
historyFromSlime(object),
nodeTypeFromString(object.field(nodeTypeKey).asString()),
Reports.fromSlime(object.field(reportsKey)),
SlimeUtils.optionalString(object.field(modelNameKey)),
SlimeUtils.optionalString(object.field(reservedToKey)).map(TenantName::from),
SlimeUtils.optionalString(object.field(exclusiveToApplicationIdKey)).map(ApplicationId::fromSerializedForm),
SlimeUtils.optionalDuration(object.field(hostTTLKey)),
SlimeUtils.optionalInstant(object.field(hostEmptyAtKey)),
SlimeUtils.optionalString(object.field(exclusiveToClusterTypeKey)).map(ClusterSpec.Type::from),
SlimeUtils.optionalString(object.field(switchHostnameKey)),
trustedCertificatesFromSlime(object),
SlimeUtils.optionalString(object.field(cloudAccountKey)).map(CloudAccount::from).orElse(CloudAccount.empty),
SlimeUtils.optionalString(object.field(wireguardPubKeyKey)).map(WireguardKey::from));
}
private Status statusFromSlime(Inspector object) {
return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey),
versionFromSlime(object.field(vespaVersionKey)),
containerImageFromSlime(object.field(currentContainerImageKey)),
(int) object.field(failCountKey).asLong(),
object.field(wantToRetireKey).asBool(),
object.field(wantToDeprovisionKey).asBool(),
object.field(wantToRebuildKey).asBool(),
object.field(preferToRetireKey).asBool(),
object.field(wantToFailKey).asBool(),
object.field(wantToUpgradeFlavorKey).asBool(),
new OsVersion(versionFromSlime(object.field(osVersionKey)),
versionFromSlime(object.field(wantedOsVersionKey))),
SlimeUtils.optionalInstant(object.field(firmwareCheckKey)));
}
private Flavor flavorFromSlime(Inspector object) {
Inspector resources = object.field(resourcesKey);
if (object.field(flavorKey).valid()) {
Flavor flavor = flavors.getFlavorOrThrow(object.field(flavorKey).asString());
if (!resources.valid()) return flavor;
return flavor.with(FlavorOverrides.ofDisk(resources.field(diskKey).asDouble()));
}
else {
return new Flavor(NodeResourcesSerializer.resourcesFromSlime(resources));
}
}
private Optional<Allocation> allocationFromSlime(NodeResources assignedResources, Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(new Allocation(applicationIdFromSlime(object),
clusterMembershipFromSlime(object),
NodeResourcesSerializer.optionalResourcesFromSlime(object.field(requestedResourcesKey))
.orElse(assignedResources),
generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey),
object.field(removableKey).asBool(),
object.field(reusableKey).asBool(),
NetworkPortsSerializer.fromSlime(object.field(networkPortsKey))));
}
private ApplicationId applicationIdFromSlime(Inspector object) {
return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()),
ApplicationName.from(object.field(applicationIdKey).asString()),
InstanceName.from(object.field(instanceIdKey).asString()));
}
private History historyFromSlime(Inspector object) {
return new History(eventsFromSlime(object.field(historyKey)),
eventsFromSlime(object.field(logKey)));
}
private List<History.Event> eventsFromSlime(Inspector array) {
if (!array.valid()) return List.of();
List<History.Event> events = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> {
History.Event event = eventFromSlime(item);
if (event != null)
events.add(event);
});
return events;
}
private History.Event eventFromSlime(Inspector object) {
History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString());
if (type == null) return null;
Instant at = Instant.ofEpochMilli(object.field(atKey).asLong());
Agent agent = eventAgentFromSlime(object.field(agentKey));
return new History.Event(type, agent, at);
}
private Generation generationFromSlime(Inspector object, String wantedField, String currentField) {
Inspector current = object.field(currentField);
return new Generation(object.field(wantedField).asLong(), current.asLong());
}
private ClusterMembership clusterMembershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(serviceIdKey).asString(),
versionFromSlime(object.field(wantedVespaVersionKey)).get(),
containerImageFromSlime(object.field(wantedContainerImageRepoKey)));
}
private Optional<Version> versionFromSlime(Inspector object) {
return object.valid() ? Optional.of(Version.fromString(object.asString())) : Optional.empty();
}
private Optional<DockerImage> containerImageFromSlime(Inspector object) {
return SlimeUtils.optionalString(object).map(DockerImage::fromString);
}
private List<String> ipAddressesFromSlime(Inspector object, String key) {
var ipAddresses = new ArrayList<String>();
object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString()));
return ipAddresses;
}
private List<HostName> hostnamesFromSlime(Inspector object) {
return SlimeUtils.entriesStream(object.field(containersKey))
.map(elem -> HostName.of(elem.field(containerHostnameKey).asString()))
.toList();
}
private List<TrustStoreItem> trustedCertificatesFromSlime(Inspector object) {
return SlimeUtils.entriesStream(object.field(trustedCertificatesKey))
.map(elem -> new TrustStoreItem(elem.field(fingerprintKey).asString(),
Instant.ofEpochMilli(elem.field(expiresKey).asLong())))
.toList();
}
/** Returns the event type, or null if this event type should be ignored */
private History.Event.Type eventTypeFromString(String eventTypeString) {
return switch (eventTypeString) {
case "provisioned" -> History.Event.Type.provisioned;
case "deprovisioned" -> History.Event.Type.deprovisioned;
case "readied" -> History.Event.Type.readied;
case "reserved" -> History.Event.Type.reserved;
case "activated" -> History.Event.Type.activated;
case "wantToRetire" -> History.Event.Type.wantToRetire;
case "wantToFail" -> History.Event.Type.wantToFail;
case "retired" -> History.Event.Type.retired;
case "deactivated" -> History.Event.Type.deactivated;
case "parked" -> History.Event.Type.parked;
case "failed" -> History.Event.Type.failed;
case "deallocated" -> History.Event.Type.deallocated;
case "down" -> History.Event.Type.down;
case "up" -> History.Event.Type.up;
case "resized" -> History.Event.Type.resized;
case "rebooted" -> History.Event.Type.rebooted;
case "osUpgraded" -> History.Event.Type.osUpgraded;
case "firmwareVerified" -> History.Event.Type.firmwareVerified;
case "breakfixed" -> History.Event.Type.breakfixed;
case "preferToRetire" -> History.Event.Type.preferToRetire;
default -> throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'");
};
}
private String toString(History.Event.Type nodeEventType) {
return switch (nodeEventType) {
case provisioned -> "provisioned";
case deprovisioned -> "deprovisioned";
case readied -> "readied";
case reserved -> "reserved";
case activated -> "activated";
case wantToRetire -> "wantToRetire";
case wantToFail -> "wantToFail";
case retired -> "retired";
case deactivated -> "deactivated";
case parked -> "parked";
case failed -> "failed";
case deallocated -> "deallocated";
case down -> "down";
case up -> "up";
case resized -> "resized";
case rebooted -> "rebooted";
case osUpgraded -> "osUpgraded";
case firmwareVerified -> "firmwareVerified";
case breakfixed -> "breakfixed";
case preferToRetire -> "preferToRetire";
};
}
private Agent eventAgentFromSlime(Inspector eventAgentField) {
return switch (eventAgentField.asString()) {
case "operator" -> Agent.operator;
case "application" -> Agent.application;
case "system" -> Agent.system;
case "nodeAdmin" -> Agent.nodeAdmin;
case "DirtyExpirer" -> Agent.DirtyExpirer;
case "DynamicProvisioningMaintainer", "HostCapacityMaintainer" -> Agent.HostCapacityMaintainer;
case "HostResumeProvisioner" -> Agent.HostResumeProvisioner;
case "FailedExpirer" -> Agent.FailedExpirer;
case "InactiveExpirer" -> Agent.InactiveExpirer;
case "NodeFailer" -> Agent.NodeFailer;
case "NodeHealthTracker" -> Agent.NodeHealthTracker;
case "ProvisionedExpirer" -> Agent.ProvisionedExpirer;
case "Rebalancer" -> Agent.Rebalancer;
case "ReservationExpirer" -> Agent.ReservationExpirer;
case "RetiringUpgrader" -> Agent.RetiringOsUpgrader;
case "RebuildingOsUpgrader" -> Agent.RebuildingOsUpgrader;
case "SpareCapacityMaintainer" -> Agent.SpareCapacityMaintainer;
case "SwitchRebalancer" -> Agent.SwitchRebalancer;
case "HostEncrypter" -> Agent.HostEncrypter;
case "ParkedExpirer" -> Agent.ParkedExpirer;
case "HostFlavorUpgrader" -> Agent.HostFlavorUpgrader;
default -> throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
};
}
private String toString(Agent agent) {
return switch (agent) {
case operator -> "operator";
case application -> "application";
case system -> "system";
case nodeAdmin -> "nodeAdmin";
case DirtyExpirer -> "DirtyExpirer";
case HostCapacityMaintainer -> "DynamicProvisioningMaintainer";
case HostResumeProvisioner -> "HostResumeProvisioner";
case FailedExpirer -> "FailedExpirer";
case InactiveExpirer -> "InactiveExpirer";
case NodeFailer -> "NodeFailer";
case NodeHealthTracker -> "NodeHealthTracker";
case ProvisionedExpirer -> "ProvisionedExpirer";
case Rebalancer -> "Rebalancer";
case ReservationExpirer -> "ReservationExpirer";
case RetiringOsUpgrader -> "RetiringUpgrader";
case RebuildingOsUpgrader -> "RebuildingOsUpgrader";
case SpareCapacityMaintainer -> "SpareCapacityMaintainer";
case SwitchRebalancer -> "SwitchRebalancer";
case HostEncrypter -> "HostEncrypter";
case ParkedExpirer -> "ParkedExpirer";
case HostFlavorUpgrader -> "HostFlavorUpgrader";
};
}
static NodeType nodeTypeFromString(String typeString) {
return switch (typeString) {
case "tenant" -> NodeType.tenant;
case "host" -> NodeType.host;
case "proxy" -> NodeType.proxy;
case "proxyhost" -> NodeType.proxyhost;
case "config" -> NodeType.config;
case "confighost" -> NodeType.confighost;
case "controller" -> NodeType.controller;
case "controllerhost" -> NodeType.controllerhost;
default -> throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
};
}
static String toString(NodeType type) {
return switch (type) {
case tenant -> "tenant";
case host -> "host";
case proxy -> "proxy";
case proxyhost -> "proxyhost";
case config -> "config";
case confighost -> "confighost";
case controller -> "controller";
case controllerhost -> "controllerhost";
};
}
static Node.State nodeStateFromString(String state) {
return switch (state) {
case "active" -> Node.State.active;
case "dirty" -> Node.State.dirty;
case "failed" -> Node.State.failed;
case "inactive" -> Node.State.inactive;
case "parked" -> Node.State.parked;
case "provisioned" -> Node.State.provisioned;
case "ready" -> Node.State.ready;
case "reserved" -> Node.State.reserved;
case "deprovisioned" -> Node.State.deprovisioned;
case "breakfixed" -> Node.State.breakfixed;
default -> throw new IllegalArgumentException("Unknown node state '" + state + "'");
};
}
static String toString(Node.State state) {
return switch (state) {
case active -> "active";
case dirty -> "dirty";
case failed -> "failed";
case inactive -> "inactive";
case parked -> "parked";
case provisioned -> "provisioned";
case ready -> "ready";
case reserved -> "reserved";
case deprovisioned -> "deprovisioned";
case breakfixed -> "breakfixed";
};
}
} | class NodeSerializer {
/** The configured node flavors */
private final NodeFlavors flavors;
private static final String stateKey = "state";
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String ipAddressPoolKey = "additionalIpAddresses";
private static final String containersKey = "containers";
private static final String containerHostnameKey = "hostname";
private static final String idKey = "openStackId";
private static final String extraIdKey = "extraId";
private static final String parentHostnameKey = "parentHostname";
private static final String historyKey = "history";
private static final String logKey = "log";
private static final String instanceKey = "instance";
private static final String rebootGenerationKey = "rebootGeneration";
private static final String currentRebootGenerationKey = "currentRebootGeneration";
private static final String vespaVersionKey = "vespaVersion";
private static final String currentContainerImageKey = "currentDockerImage";
private static final String failCountKey = "failCount";
private static final String nodeTypeKey = "type";
private static final String wantToRetireKey = "wantToRetire";
private static final String wantToDeprovisionKey = "wantToDeprovision";
private static final String wantToRebuildKey = "wantToRebuild";
private static final String preferToRetireKey = "preferToRetire";
private static final String wantToFailKey = "wantToFailKey";
private static final String wantToUpgradeFlavorKey = "wantToUpgradeFlavor";
private static final String osVersionKey = "osVersion";
private static final String wantedOsVersionKey = "wantedOsVersion";
private static final String firmwareCheckKey = "firmwareCheck";
private static final String reportsKey = "reports";
private static final String modelNameKey = "modelName";
private static final String reservedToKey = "reservedTo";
private static final String exclusiveToApplicationIdKey = "exclusiveTo";
private static final String hostTTLKey = "hostTTL";
private static final String hostEmptyAtKey = "hostEmptyAt";
private static final String exclusiveToClusterTypeKey = "exclusiveToClusterType";
private static final String switchHostnameKey = "switchHostname";
private static final String trustedCertificatesKey = "trustedCertificates";
private static final String cloudAccountKey = "cloudAccount";
private static final String wireguardPubKeyKey = "wireguardPubkey";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String diskKey = "disk";
private static final String tenantIdKey = "tenantId";
private static final String applicationIdKey = "applicationId";
private static final String instanceIdKey = "instanceId";
private static final String serviceIdKey = "serviceId";
private static final String requestedResourcesKey = "requestedResources";
private static final String restartGenerationKey = "restartGeneration";
private static final String currentRestartGenerationKey = "currentRestartGeneration";
private static final String removableKey = "removable";
private static final String reusableKey = "reusable";
private static final String wantedVespaVersionKey = "wantedVespaVersion";
private static final String wantedContainerImageRepoKey = "wantedDockerImageRepo";
private static final String historyEventTypeKey = "type";
private static final String atKey = "at";
private static final String agentKey = "agent";
private static final String networkPortsKey = "networkPorts";
private static final String fingerprintKey = "fingerprint";
private static final String expiresKey = "expires";
public NodeSerializer(NodeFlavors flavors) {
this.flavors = flavors;
}
public byte[] toJson(Node node) {
try {
Slime slime = new Slime();
toSlime(node, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new RuntimeException("Serialization of " + node + " to json failed", e);
}
}
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
object.setString(stateKey, toString(node.state()));
toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey), true);
toSlime(node.ipConfig().pool().ips(), object.setArray(ipAddressPoolKey), true);
toSlime(node.ipConfig().pool().hostnames(), object);
object.setString(idKey, node.id());
node.extraId().ifPresent(id -> object.setString(extraIdKey, id));
node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname));
toSlime(node.flavor(), object);
object.setLong(rebootGenerationKey, node.status().reboot().wanted());
object.setLong(currentRebootGenerationKey, node.status().reboot().current());
node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString()));
node.status().containerImage().ifPresent(image -> object.setString(currentContainerImageKey, image.asString()));
object.setLong(failCountKey, node.status().failCount());
object.setBool(wantToRetireKey, node.status().wantToRetire());
object.setBool(preferToRetireKey, node.status().preferToRetire());
object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision());
object.setBool(wantToFailKey, node.status().wantToFail());
object.setBool(wantToRebuildKey, node.status().wantToRebuild());
object.setBool(wantToUpgradeFlavorKey, node.status().wantToUpgradeFlavor());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history().events(), object.setArray(historyKey));
toSlime(node.history().log(), object.setArray(logKey));
object.setString(nodeTypeKey, toString(node.type()));
node.status().osVersion().current().ifPresent(version -> object.setString(osVersionKey, version.toString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString(wantedOsVersionKey, version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli()));
node.switchHostname().ifPresent(switchHostname -> object.setString(switchHostnameKey, switchHostname));
node.reports().toSlime(object, reportsKey);
node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName));
node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value()));
node.exclusiveToApplicationId().ifPresent(applicationId -> object.setString(exclusiveToApplicationIdKey, applicationId.serializedForm()));
node.hostTTL().ifPresent(hostTTL -> object.setLong(hostTTLKey, hostTTL.toMillis()));
node.hostEmptyAt().ifPresent(emptyAt -> object.setLong(hostEmptyAtKey, emptyAt.toEpochMilli()));
node.exclusiveToClusterType().ifPresent(clusterType -> object.setString(exclusiveToClusterTypeKey, clusterType.name()));
trustedCertificatesToSlime(node.trustedCertificates(), object.setArray(trustedCertificatesKey));
if (!node.cloudAccount().isUnspecified()) {
object.setString(cloudAccountKey, node.cloudAccount().value());
}
node.wireguardPubKey().ifPresent(pubKey -> object.setString(wireguardPubKeyKey, pubKey.value()));
}
private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
if (flavor.flavorOverrides().isPresent()) {
Cursor resourcesObject = object.setObject(resourcesKey);
flavor.flavorOverrides().get().diskGb().ifPresent(diskGb -> resourcesObject.setDouble(diskKey, diskGb));
}
}
else {
NodeResourcesSerializer.toSlime(flavor.resources(), object.setObject(resourcesKey));
}
}
private void toSlime(Allocation allocation, Cursor object) {
NodeResourcesSerializer.toSlime(allocation.requestedResources(), object.setObject(requestedResourcesKey));
object.setString(tenantIdKey, allocation.owner().tenant().value());
object.setString(applicationIdKey, allocation.owner().application().value());
object.setString(instanceIdKey, allocation.owner().instance().value());
object.setString(serviceIdKey, allocation.membership().stringValue());
object.setLong(restartGenerationKey, allocation.restartGeneration().wanted());
object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current());
object.setBool(removableKey, allocation.removable());
object.setBool(reusableKey, allocation.reusable());
object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString());
allocation.membership().cluster().dockerImageRepo().ifPresent(repo -> object.setString(wantedContainerImageRepoKey, repo.untagged()));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(networkPortsKey)));
}
private void toSlime(Collection<History.Event> events, Cursor array) {
for (History.Event event : events)
toSlime(event, array.addObject());
}
private void toSlime(History.Event event, Cursor object) {
object.setString(historyEventTypeKey, toString(event.type()));
object.setLong(atKey, event.at().toEpochMilli());
object.setString(agentKey, toString(event.agent()));
}
private void toSlime(List<HostName> hostnames, Cursor object) {
if (hostnames.isEmpty()) return;
Cursor containersArray = object.setArray(containersKey);
hostnames.forEach(hostname -> {
containersArray.addObject().setString(containerHostnameKey, hostname.value());
});
}
private void trustedCertificatesToSlime(List<TrustStoreItem> trustStoreItems, Cursor array) {
trustStoreItems.forEach(cert -> {
Cursor object = array.addObject();
object.setString(fingerprintKey, cert.fingerprint());
object.setLong(expiresKey, cert.expiry().toEpochMilli());
});
}
public Node fromJson(byte[] data) {
return nodeFromSlime(SlimeUtils.jsonToSlime(data).get());
}
private Node nodeFromSlime(Inspector object) {
Flavor flavor = flavorFromSlime(object);
return new Node(object.field(idKey).asString(),
SlimeUtils.optionalString(object.field(extraIdKey)),
IP.Config.of(ipAddressesFromSlime(object, ipAddressesKey),
ipAddressesFromSlime(object, ipAddressPoolKey),
hostnamesFromSlime(object)),
object.field(hostnameKey).asString(),
SlimeUtils.optionalString(object.field(parentHostnameKey)),
flavor,
statusFromSlime(object),
nodeStateFromString(object.field(stateKey).asString()),
allocationFromSlime(flavor.resources(), object.field(instanceKey)),
historyFromSlime(object),
nodeTypeFromString(object.field(nodeTypeKey).asString()),
Reports.fromSlime(object.field(reportsKey)),
SlimeUtils.optionalString(object.field(modelNameKey)),
SlimeUtils.optionalString(object.field(reservedToKey)).map(TenantName::from),
SlimeUtils.optionalString(object.field(exclusiveToApplicationIdKey)).map(ApplicationId::fromSerializedForm),
SlimeUtils.optionalDuration(object.field(hostTTLKey)),
SlimeUtils.optionalInstant(object.field(hostEmptyAtKey)),
SlimeUtils.optionalString(object.field(exclusiveToClusterTypeKey)).map(ClusterSpec.Type::from),
SlimeUtils.optionalString(object.field(switchHostnameKey)),
trustedCertificatesFromSlime(object),
SlimeUtils.optionalString(object.field(cloudAccountKey)).map(CloudAccount::from).orElse(CloudAccount.empty),
SlimeUtils.optionalString(object.field(wireguardPubKeyKey)).map(WireguardKey::from));
}
private Status statusFromSlime(Inspector object) {
return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey),
versionFromSlime(object.field(vespaVersionKey)),
containerImageFromSlime(object.field(currentContainerImageKey)),
(int) object.field(failCountKey).asLong(),
object.field(wantToRetireKey).asBool(),
object.field(wantToDeprovisionKey).asBool(),
object.field(wantToRebuildKey).asBool(),
object.field(preferToRetireKey).asBool(),
object.field(wantToFailKey).asBool(),
object.field(wantToUpgradeFlavorKey).asBool(),
new OsVersion(versionFromSlime(object.field(osVersionKey)),
versionFromSlime(object.field(wantedOsVersionKey))),
SlimeUtils.optionalInstant(object.field(firmwareCheckKey)));
}
private Flavor flavorFromSlime(Inspector object) {
Inspector resources = object.field(resourcesKey);
if (object.field(flavorKey).valid()) {
Flavor flavor = flavors.getFlavorOrThrow(object.field(flavorKey).asString());
if (!resources.valid()) return flavor;
return flavor.with(FlavorOverrides.ofDisk(resources.field(diskKey).asDouble()));
}
else {
return new Flavor(NodeResourcesSerializer.resourcesFromSlime(resources));
}
}
private Optional<Allocation> allocationFromSlime(NodeResources assignedResources, Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(new Allocation(applicationIdFromSlime(object),
clusterMembershipFromSlime(object),
NodeResourcesSerializer.optionalResourcesFromSlime(object.field(requestedResourcesKey))
.orElse(assignedResources),
generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey),
object.field(removableKey).asBool(),
object.field(reusableKey).asBool(),
NetworkPortsSerializer.fromSlime(object.field(networkPortsKey))));
}
private ApplicationId applicationIdFromSlime(Inspector object) {
return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()),
ApplicationName.from(object.field(applicationIdKey).asString()),
InstanceName.from(object.field(instanceIdKey).asString()));
}
private History historyFromSlime(Inspector object) {
return new History(eventsFromSlime(object.field(historyKey)),
eventsFromSlime(object.field(logKey)));
}
private List<History.Event> eventsFromSlime(Inspector array) {
if (!array.valid()) return List.of();
List<History.Event> events = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> {
History.Event event = eventFromSlime(item);
if (event != null)
events.add(event);
});
return events;
}
private History.Event eventFromSlime(Inspector object) {
History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString());
if (type == null) return null;
Instant at = Instant.ofEpochMilli(object.field(atKey).asLong());
Agent agent = eventAgentFromSlime(object.field(agentKey));
return new History.Event(type, agent, at);
}
private Generation generationFromSlime(Inspector object, String wantedField, String currentField) {
Inspector current = object.field(currentField);
return new Generation(object.field(wantedField).asLong(), current.asLong());
}
private ClusterMembership clusterMembershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(serviceIdKey).asString(),
versionFromSlime(object.field(wantedVespaVersionKey)).get(),
containerImageFromSlime(object.field(wantedContainerImageRepoKey)));
}
private Optional<Version> versionFromSlime(Inspector object) {
return object.valid() ? Optional.of(Version.fromString(object.asString())) : Optional.empty();
}
private Optional<DockerImage> containerImageFromSlime(Inspector object) {
return SlimeUtils.optionalString(object).map(DockerImage::fromString);
}
private List<String> ipAddressesFromSlime(Inspector object, String key) {
var ipAddresses = new ArrayList<String>();
object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString()));
return ipAddresses;
}
private List<HostName> hostnamesFromSlime(Inspector object) {
return SlimeUtils.entriesStream(object.field(containersKey))
.map(elem -> HostName.of(elem.field(containerHostnameKey).asString()))
.toList();
}
private List<TrustStoreItem> trustedCertificatesFromSlime(Inspector object) {
return SlimeUtils.entriesStream(object.field(trustedCertificatesKey))
.map(elem -> new TrustStoreItem(elem.field(fingerprintKey).asString(),
Instant.ofEpochMilli(elem.field(expiresKey).asLong())))
.toList();
}
/** Returns the event type, or null if this event type should be ignored */
private History.Event.Type eventTypeFromString(String eventTypeString) {
return switch (eventTypeString) {
case "provisioned" -> History.Event.Type.provisioned;
case "deprovisioned" -> History.Event.Type.deprovisioned;
case "readied" -> History.Event.Type.readied;
case "reserved" -> History.Event.Type.reserved;
case "activated" -> History.Event.Type.activated;
case "wantToRetire" -> History.Event.Type.wantToRetire;
case "wantToFail" -> History.Event.Type.wantToFail;
case "retired" -> History.Event.Type.retired;
case "deactivated" -> History.Event.Type.deactivated;
case "parked" -> History.Event.Type.parked;
case "failed" -> History.Event.Type.failed;
case "deallocated" -> History.Event.Type.deallocated;
case "down" -> History.Event.Type.down;
case "up" -> History.Event.Type.up;
case "resized" -> History.Event.Type.resized;
case "rebooted" -> History.Event.Type.rebooted;
case "osUpgraded" -> History.Event.Type.osUpgraded;
case "firmwareVerified" -> History.Event.Type.firmwareVerified;
case "breakfixed" -> History.Event.Type.breakfixed;
case "preferToRetire" -> History.Event.Type.preferToRetire;
default -> throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'");
};
}
private String toString(History.Event.Type nodeEventType) {
return switch (nodeEventType) {
case provisioned -> "provisioned";
case deprovisioned -> "deprovisioned";
case readied -> "readied";
case reserved -> "reserved";
case activated -> "activated";
case wantToRetire -> "wantToRetire";
case wantToFail -> "wantToFail";
case retired -> "retired";
case deactivated -> "deactivated";
case parked -> "parked";
case failed -> "failed";
case deallocated -> "deallocated";
case down -> "down";
case up -> "up";
case resized -> "resized";
case rebooted -> "rebooted";
case osUpgraded -> "osUpgraded";
case firmwareVerified -> "firmwareVerified";
case breakfixed -> "breakfixed";
case preferToRetire -> "preferToRetire";
};
}
private Agent eventAgentFromSlime(Inspector eventAgentField) {
return switch (eventAgentField.asString()) {
case "operator" -> Agent.operator;
case "application" -> Agent.application;
case "system" -> Agent.system;
case "nodeAdmin" -> Agent.nodeAdmin;
case "DirtyExpirer" -> Agent.DirtyExpirer;
case "DynamicProvisioningMaintainer", "HostCapacityMaintainer" -> Agent.HostCapacityMaintainer;
case "HostResumeProvisioner" -> Agent.HostResumeProvisioner;
case "FailedExpirer" -> Agent.FailedExpirer;
case "InactiveExpirer" -> Agent.InactiveExpirer;
case "NodeFailer" -> Agent.NodeFailer;
case "NodeHealthTracker" -> Agent.NodeHealthTracker;
case "ProvisionedExpirer" -> Agent.ProvisionedExpirer;
case "Rebalancer" -> Agent.Rebalancer;
case "ReservationExpirer" -> Agent.ReservationExpirer;
case "RetiringUpgrader" -> Agent.RetiringOsUpgrader;
case "RebuildingOsUpgrader" -> Agent.RebuildingOsUpgrader;
case "SpareCapacityMaintainer" -> Agent.SpareCapacityMaintainer;
case "SwitchRebalancer" -> Agent.SwitchRebalancer;
case "HostEncrypter" -> Agent.HostEncrypter;
case "ParkedExpirer" -> Agent.ParkedExpirer;
case "HostFlavorUpgrader" -> Agent.HostFlavorUpgrader;
default -> throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
};
}
private String toString(Agent agent) {
return switch (agent) {
case operator -> "operator";
case application -> "application";
case system -> "system";
case nodeAdmin -> "nodeAdmin";
case DirtyExpirer -> "DirtyExpirer";
case HostCapacityMaintainer -> "DynamicProvisioningMaintainer";
case HostResumeProvisioner -> "HostResumeProvisioner";
case FailedExpirer -> "FailedExpirer";
case InactiveExpirer -> "InactiveExpirer";
case NodeFailer -> "NodeFailer";
case NodeHealthTracker -> "NodeHealthTracker";
case ProvisionedExpirer -> "ProvisionedExpirer";
case Rebalancer -> "Rebalancer";
case ReservationExpirer -> "ReservationExpirer";
case RetiringOsUpgrader -> "RetiringUpgrader";
case RebuildingOsUpgrader -> "RebuildingOsUpgrader";
case SpareCapacityMaintainer -> "SpareCapacityMaintainer";
case SwitchRebalancer -> "SwitchRebalancer";
case HostEncrypter -> "HostEncrypter";
case ParkedExpirer -> "ParkedExpirer";
case HostFlavorUpgrader -> "HostFlavorUpgrader";
};
}
static NodeType nodeTypeFromString(String typeString) {
return switch (typeString) {
case "tenant" -> NodeType.tenant;
case "host" -> NodeType.host;
case "proxy" -> NodeType.proxy;
case "proxyhost" -> NodeType.proxyhost;
case "config" -> NodeType.config;
case "confighost" -> NodeType.confighost;
case "controller" -> NodeType.controller;
case "controllerhost" -> NodeType.controllerhost;
default -> throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
};
}
static String toString(NodeType type) {
return switch (type) {
case tenant -> "tenant";
case host -> "host";
case proxy -> "proxy";
case proxyhost -> "proxyhost";
case config -> "config";
case confighost -> "confighost";
case controller -> "controller";
case controllerhost -> "controllerhost";
};
}
static Node.State nodeStateFromString(String state) {
return switch (state) {
case "active" -> Node.State.active;
case "dirty" -> Node.State.dirty;
case "failed" -> Node.State.failed;
case "inactive" -> Node.State.inactive;
case "parked" -> Node.State.parked;
case "provisioned" -> Node.State.provisioned;
case "ready" -> Node.State.ready;
case "reserved" -> Node.State.reserved;
case "deprovisioned" -> Node.State.deprovisioned;
case "breakfixed" -> Node.State.breakfixed;
default -> throw new IllegalArgumentException("Unknown node state '" + state + "'");
};
}
static String toString(Node.State state) {
return switch (state) {
case active -> "active";
case dirty -> "dirty";
case failed -> "failed";
case inactive -> "inactive";
case parked -> "parked";
case provisioned -> "provisioned";
case ready -> "ready";
case reserved -> "reserved";
case deprovisioned -> "deprovisioned";
case breakfixed -> "breakfixed";
};
}
} |
I didn't even see this... Great, let me remove this... but in a later PR. | private void toSlime(List<String> addresses, Cursor array, boolean dummyDueToErasure) {
addresses.stream().map(IP::parse).sorted(IP.NATURAL_ORDER).map(IP::asString).forEach(array::addString);
} | addresses.stream().map(IP::parse).sorted(IP.NATURAL_ORDER).map(IP::asString).forEach(array::addString); | private void toSlime(List<String> addresses, Cursor array, boolean dummyDueToErasure) {
addresses.stream().map(IP::parse).sorted(IP.NATURAL_ORDER).map(IP::asString).forEach(array::addString);
} | class NodeSerializer {
/** The configured node flavors */
private final NodeFlavors flavors;
private static final String stateKey = "state";
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String ipAddressPoolKey = "additionalIpAddresses";
private static final String containersKey = "containers";
private static final String containerHostnameKey = "hostname";
private static final String idKey = "openStackId";
private static final String extraIdKey = "extraId";
private static final String parentHostnameKey = "parentHostname";
private static final String historyKey = "history";
private static final String logKey = "log";
private static final String instanceKey = "instance";
private static final String rebootGenerationKey = "rebootGeneration";
private static final String currentRebootGenerationKey = "currentRebootGeneration";
private static final String vespaVersionKey = "vespaVersion";
private static final String currentContainerImageKey = "currentDockerImage";
private static final String failCountKey = "failCount";
private static final String nodeTypeKey = "type";
private static final String wantToRetireKey = "wantToRetire";
private static final String wantToDeprovisionKey = "wantToDeprovision";
private static final String wantToRebuildKey = "wantToRebuild";
private static final String preferToRetireKey = "preferToRetire";
private static final String wantToFailKey = "wantToFailKey";
private static final String wantToUpgradeFlavorKey = "wantToUpgradeFlavor";
private static final String osVersionKey = "osVersion";
private static final String wantedOsVersionKey = "wantedOsVersion";
private static final String firmwareCheckKey = "firmwareCheck";
private static final String reportsKey = "reports";
private static final String modelNameKey = "modelName";
private static final String reservedToKey = "reservedTo";
private static final String exclusiveToApplicationIdKey = "exclusiveTo";
private static final String hostTTLKey = "hostTTL";
private static final String hostEmptyAtKey = "hostEmptyAt";
private static final String exclusiveToClusterTypeKey = "exclusiveToClusterType";
private static final String switchHostnameKey = "switchHostname";
private static final String trustedCertificatesKey = "trustedCertificates";
private static final String cloudAccountKey = "cloudAccount";
private static final String wireguardPubKeyKey = "wireguardPubkey";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String diskKey = "disk";
private static final String tenantIdKey = "tenantId";
private static final String applicationIdKey = "applicationId";
private static final String instanceIdKey = "instanceId";
private static final String serviceIdKey = "serviceId";
private static final String requestedResourcesKey = "requestedResources";
private static final String restartGenerationKey = "restartGeneration";
private static final String currentRestartGenerationKey = "currentRestartGeneration";
private static final String removableKey = "removable";
private static final String reusableKey = "reusable";
private static final String wantedVespaVersionKey = "wantedVespaVersion";
private static final String wantedContainerImageRepoKey = "wantedDockerImageRepo";
private static final String historyEventTypeKey = "type";
private static final String atKey = "at";
private static final String agentKey = "agent";
private static final String networkPortsKey = "networkPorts";
private static final String fingerprintKey = "fingerprint";
private static final String expiresKey = "expires";
public NodeSerializer(NodeFlavors flavors) {
this.flavors = flavors;
}
public byte[] toJson(Node node) {
try {
Slime slime = new Slime();
toSlime(node, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new RuntimeException("Serialization of " + node + " to json failed", e);
}
}
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
object.setString(stateKey, toString(node.state()));
toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey), true);
toSlime(node.ipConfig().pool().ips(), object.setArray(ipAddressPoolKey), true);
toSlime(node.ipConfig().pool().hostnames(), object);
object.setString(idKey, node.id());
node.extraId().ifPresent(id -> object.setString(extraIdKey, id));
node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname));
toSlime(node.flavor(), object);
object.setLong(rebootGenerationKey, node.status().reboot().wanted());
object.setLong(currentRebootGenerationKey, node.status().reboot().current());
node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString()));
node.status().containerImage().ifPresent(image -> object.setString(currentContainerImageKey, image.asString()));
object.setLong(failCountKey, node.status().failCount());
object.setBool(wantToRetireKey, node.status().wantToRetire());
object.setBool(preferToRetireKey, node.status().preferToRetire());
object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision());
object.setBool(wantToFailKey, node.status().wantToFail());
object.setBool(wantToRebuildKey, node.status().wantToRebuild());
object.setBool(wantToUpgradeFlavorKey, node.status().wantToUpgradeFlavor());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history().events(), object.setArray(historyKey));
toSlime(node.history().log(), object.setArray(logKey));
object.setString(nodeTypeKey, toString(node.type()));
node.status().osVersion().current().ifPresent(version -> object.setString(osVersionKey, version.toString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString(wantedOsVersionKey, version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli()));
node.switchHostname().ifPresent(switchHostname -> object.setString(switchHostnameKey, switchHostname));
node.reports().toSlime(object, reportsKey);
node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName));
node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value()));
node.exclusiveToApplicationId().ifPresent(applicationId -> object.setString(exclusiveToApplicationIdKey, applicationId.serializedForm()));
node.hostTTL().ifPresent(hostTTL -> object.setLong(hostTTLKey, hostTTL.toMillis()));
node.hostEmptyAt().ifPresent(emptyAt -> object.setLong(hostEmptyAtKey, emptyAt.toEpochMilli()));
node.exclusiveToClusterType().ifPresent(clusterType -> object.setString(exclusiveToClusterTypeKey, clusterType.name()));
trustedCertificatesToSlime(node.trustedCertificates(), object.setArray(trustedCertificatesKey));
if (!node.cloudAccount().isUnspecified()) {
object.setString(cloudAccountKey, node.cloudAccount().value());
}
node.wireguardPubKey().ifPresent(pubKey -> object.setString(wireguardPubKeyKey, pubKey.value()));
}
private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
if (flavor.flavorOverrides().isPresent()) {
Cursor resourcesObject = object.setObject(resourcesKey);
flavor.flavorOverrides().get().diskGb().ifPresent(diskGb -> resourcesObject.setDouble(diskKey, diskGb));
}
}
else {
NodeResourcesSerializer.toSlime(flavor.resources(), object.setObject(resourcesKey));
}
}
private void toSlime(Allocation allocation, Cursor object) {
NodeResourcesSerializer.toSlime(allocation.requestedResources(), object.setObject(requestedResourcesKey));
object.setString(tenantIdKey, allocation.owner().tenant().value());
object.setString(applicationIdKey, allocation.owner().application().value());
object.setString(instanceIdKey, allocation.owner().instance().value());
object.setString(serviceIdKey, allocation.membership().stringValue());
object.setLong(restartGenerationKey, allocation.restartGeneration().wanted());
object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current());
object.setBool(removableKey, allocation.removable());
object.setBool(reusableKey, allocation.reusable());
object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString());
allocation.membership().cluster().dockerImageRepo().ifPresent(repo -> object.setString(wantedContainerImageRepoKey, repo.untagged()));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(networkPortsKey)));
}
private void toSlime(Collection<History.Event> events, Cursor array) {
for (History.Event event : events)
toSlime(event, array.addObject());
}
private void toSlime(History.Event event, Cursor object) {
object.setString(historyEventTypeKey, toString(event.type()));
object.setLong(atKey, event.at().toEpochMilli());
object.setString(agentKey, toString(event.agent()));
}
private void toSlime(List<HostName> hostnames, Cursor object) {
if (hostnames.isEmpty()) return;
Cursor containersArray = object.setArray(containersKey);
hostnames.forEach(hostname -> {
containersArray.addObject().setString(containerHostnameKey, hostname.value());
});
}
private void trustedCertificatesToSlime(List<TrustStoreItem> trustStoreItems, Cursor array) {
trustStoreItems.forEach(cert -> {
Cursor object = array.addObject();
object.setString(fingerprintKey, cert.fingerprint());
object.setLong(expiresKey, cert.expiry().toEpochMilli());
});
}
public Node fromJson(byte[] data) {
return nodeFromSlime(SlimeUtils.jsonToSlime(data).get());
}
private Node nodeFromSlime(Inspector object) {
Flavor flavor = flavorFromSlime(object);
return new Node(object.field(idKey).asString(),
SlimeUtils.optionalString(object.field(extraIdKey)),
IP.Config.of(ipAddressesFromSlime(object, ipAddressesKey),
ipAddressesFromSlime(object, ipAddressPoolKey),
hostnamesFromSlime(object)),
object.field(hostnameKey).asString(),
SlimeUtils.optionalString(object.field(parentHostnameKey)),
flavor,
statusFromSlime(object),
nodeStateFromString(object.field(stateKey).asString()),
allocationFromSlime(flavor.resources(), object.field(instanceKey)),
historyFromSlime(object),
nodeTypeFromString(object.field(nodeTypeKey).asString()),
Reports.fromSlime(object.field(reportsKey)),
SlimeUtils.optionalString(object.field(modelNameKey)),
SlimeUtils.optionalString(object.field(reservedToKey)).map(TenantName::from),
SlimeUtils.optionalString(object.field(exclusiveToApplicationIdKey)).map(ApplicationId::fromSerializedForm),
SlimeUtils.optionalDuration(object.field(hostTTLKey)),
SlimeUtils.optionalInstant(object.field(hostEmptyAtKey)),
SlimeUtils.optionalString(object.field(exclusiveToClusterTypeKey)).map(ClusterSpec.Type::from),
SlimeUtils.optionalString(object.field(switchHostnameKey)),
trustedCertificatesFromSlime(object),
SlimeUtils.optionalString(object.field(cloudAccountKey)).map(CloudAccount::from).orElse(CloudAccount.empty),
SlimeUtils.optionalString(object.field(wireguardPubKeyKey)).map(WireguardKey::from));
}
private Status statusFromSlime(Inspector object) {
return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey),
versionFromSlime(object.field(vespaVersionKey)),
containerImageFromSlime(object.field(currentContainerImageKey)),
(int) object.field(failCountKey).asLong(),
object.field(wantToRetireKey).asBool(),
object.field(wantToDeprovisionKey).asBool(),
object.field(wantToRebuildKey).asBool(),
object.field(preferToRetireKey).asBool(),
object.field(wantToFailKey).asBool(),
object.field(wantToUpgradeFlavorKey).asBool(),
new OsVersion(versionFromSlime(object.field(osVersionKey)),
versionFromSlime(object.field(wantedOsVersionKey))),
SlimeUtils.optionalInstant(object.field(firmwareCheckKey)));
}
private Flavor flavorFromSlime(Inspector object) {
Inspector resources = object.field(resourcesKey);
if (object.field(flavorKey).valid()) {
Flavor flavor = flavors.getFlavorOrThrow(object.field(flavorKey).asString());
if (!resources.valid()) return flavor;
return flavor.with(FlavorOverrides.ofDisk(resources.field(diskKey).asDouble()));
}
else {
return new Flavor(NodeResourcesSerializer.resourcesFromSlime(resources));
}
}
private Optional<Allocation> allocationFromSlime(NodeResources assignedResources, Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(new Allocation(applicationIdFromSlime(object),
clusterMembershipFromSlime(object),
NodeResourcesSerializer.optionalResourcesFromSlime(object.field(requestedResourcesKey))
.orElse(assignedResources),
generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey),
object.field(removableKey).asBool(),
object.field(reusableKey).asBool(),
NetworkPortsSerializer.fromSlime(object.field(networkPortsKey))));
}
private ApplicationId applicationIdFromSlime(Inspector object) {
return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()),
ApplicationName.from(object.field(applicationIdKey).asString()),
InstanceName.from(object.field(instanceIdKey).asString()));
}
private History historyFromSlime(Inspector object) {
return new History(eventsFromSlime(object.field(historyKey)),
eventsFromSlime(object.field(logKey)));
}
private List<History.Event> eventsFromSlime(Inspector array) {
if (!array.valid()) return List.of();
List<History.Event> events = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> {
History.Event event = eventFromSlime(item);
if (event != null)
events.add(event);
});
return events;
}
private History.Event eventFromSlime(Inspector object) {
History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString());
if (type == null) return null;
Instant at = Instant.ofEpochMilli(object.field(atKey).asLong());
Agent agent = eventAgentFromSlime(object.field(agentKey));
return new History.Event(type, agent, at);
}
private Generation generationFromSlime(Inspector object, String wantedField, String currentField) {
Inspector current = object.field(currentField);
return new Generation(object.field(wantedField).asLong(), current.asLong());
}
private ClusterMembership clusterMembershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(serviceIdKey).asString(),
versionFromSlime(object.field(wantedVespaVersionKey)).get(),
containerImageFromSlime(object.field(wantedContainerImageRepoKey)));
}
private Optional<Version> versionFromSlime(Inspector object) {
return object.valid() ? Optional.of(Version.fromString(object.asString())) : Optional.empty();
}
private Optional<DockerImage> containerImageFromSlime(Inspector object) {
return SlimeUtils.optionalString(object).map(DockerImage::fromString);
}
private List<String> ipAddressesFromSlime(Inspector object, String key) {
var ipAddresses = new ArrayList<String>();
object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString()));
return ipAddresses;
}
private List<HostName> hostnamesFromSlime(Inspector object) {
return SlimeUtils.entriesStream(object.field(containersKey))
.map(elem -> HostName.of(elem.field(containerHostnameKey).asString()))
.toList();
}
private List<TrustStoreItem> trustedCertificatesFromSlime(Inspector object) {
return SlimeUtils.entriesStream(object.field(trustedCertificatesKey))
.map(elem -> new TrustStoreItem(elem.field(fingerprintKey).asString(),
Instant.ofEpochMilli(elem.field(expiresKey).asLong())))
.toList();
}
/** Returns the event type, or null if this event type should be ignored */
private History.Event.Type eventTypeFromString(String eventTypeString) {
return switch (eventTypeString) {
case "provisioned" -> History.Event.Type.provisioned;
case "deprovisioned" -> History.Event.Type.deprovisioned;
case "readied" -> History.Event.Type.readied;
case "reserved" -> History.Event.Type.reserved;
case "activated" -> History.Event.Type.activated;
case "wantToRetire" -> History.Event.Type.wantToRetire;
case "wantToFail" -> History.Event.Type.wantToFail;
case "retired" -> History.Event.Type.retired;
case "deactivated" -> History.Event.Type.deactivated;
case "parked" -> History.Event.Type.parked;
case "failed" -> History.Event.Type.failed;
case "deallocated" -> History.Event.Type.deallocated;
case "down" -> History.Event.Type.down;
case "up" -> History.Event.Type.up;
case "resized" -> History.Event.Type.resized;
case "rebooted" -> History.Event.Type.rebooted;
case "osUpgraded" -> History.Event.Type.osUpgraded;
case "firmwareVerified" -> History.Event.Type.firmwareVerified;
case "breakfixed" -> History.Event.Type.breakfixed;
case "preferToRetire" -> History.Event.Type.preferToRetire;
default -> throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'");
};
}
private String toString(History.Event.Type nodeEventType) {
return switch (nodeEventType) {
case provisioned -> "provisioned";
case deprovisioned -> "deprovisioned";
case readied -> "readied";
case reserved -> "reserved";
case activated -> "activated";
case wantToRetire -> "wantToRetire";
case wantToFail -> "wantToFail";
case retired -> "retired";
case deactivated -> "deactivated";
case parked -> "parked";
case failed -> "failed";
case deallocated -> "deallocated";
case down -> "down";
case up -> "up";
case resized -> "resized";
case rebooted -> "rebooted";
case osUpgraded -> "osUpgraded";
case firmwareVerified -> "firmwareVerified";
case breakfixed -> "breakfixed";
case preferToRetire -> "preferToRetire";
};
}
private Agent eventAgentFromSlime(Inspector eventAgentField) {
return switch (eventAgentField.asString()) {
case "operator" -> Agent.operator;
case "application" -> Agent.application;
case "system" -> Agent.system;
case "nodeAdmin" -> Agent.nodeAdmin;
case "DirtyExpirer" -> Agent.DirtyExpirer;
case "DynamicProvisioningMaintainer", "HostCapacityMaintainer" -> Agent.HostCapacityMaintainer;
case "HostResumeProvisioner" -> Agent.HostResumeProvisioner;
case "FailedExpirer" -> Agent.FailedExpirer;
case "InactiveExpirer" -> Agent.InactiveExpirer;
case "NodeFailer" -> Agent.NodeFailer;
case "NodeHealthTracker" -> Agent.NodeHealthTracker;
case "ProvisionedExpirer" -> Agent.ProvisionedExpirer;
case "Rebalancer" -> Agent.Rebalancer;
case "ReservationExpirer" -> Agent.ReservationExpirer;
case "RetiringUpgrader" -> Agent.RetiringOsUpgrader;
case "RebuildingOsUpgrader" -> Agent.RebuildingOsUpgrader;
case "SpareCapacityMaintainer" -> Agent.SpareCapacityMaintainer;
case "SwitchRebalancer" -> Agent.SwitchRebalancer;
case "HostEncrypter" -> Agent.HostEncrypter;
case "ParkedExpirer" -> Agent.ParkedExpirer;
case "HostFlavorUpgrader" -> Agent.HostFlavorUpgrader;
default -> throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
};
}
private String toString(Agent agent) {
return switch (agent) {
case operator -> "operator";
case application -> "application";
case system -> "system";
case nodeAdmin -> "nodeAdmin";
case DirtyExpirer -> "DirtyExpirer";
case HostCapacityMaintainer -> "DynamicProvisioningMaintainer";
case HostResumeProvisioner -> "HostResumeProvisioner";
case FailedExpirer -> "FailedExpirer";
case InactiveExpirer -> "InactiveExpirer";
case NodeFailer -> "NodeFailer";
case NodeHealthTracker -> "NodeHealthTracker";
case ProvisionedExpirer -> "ProvisionedExpirer";
case Rebalancer -> "Rebalancer";
case ReservationExpirer -> "ReservationExpirer";
case RetiringOsUpgrader -> "RetiringUpgrader";
case RebuildingOsUpgrader -> "RebuildingOsUpgrader";
case SpareCapacityMaintainer -> "SpareCapacityMaintainer";
case SwitchRebalancer -> "SwitchRebalancer";
case HostEncrypter -> "HostEncrypter";
case ParkedExpirer -> "ParkedExpirer";
case HostFlavorUpgrader -> "HostFlavorUpgrader";
};
}
static NodeType nodeTypeFromString(String typeString) {
return switch (typeString) {
case "tenant" -> NodeType.tenant;
case "host" -> NodeType.host;
case "proxy" -> NodeType.proxy;
case "proxyhost" -> NodeType.proxyhost;
case "config" -> NodeType.config;
case "confighost" -> NodeType.confighost;
case "controller" -> NodeType.controller;
case "controllerhost" -> NodeType.controllerhost;
default -> throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
};
}
static String toString(NodeType type) {
return switch (type) {
case tenant -> "tenant";
case host -> "host";
case proxy -> "proxy";
case proxyhost -> "proxyhost";
case config -> "config";
case confighost -> "confighost";
case controller -> "controller";
case controllerhost -> "controllerhost";
};
}
static Node.State nodeStateFromString(String state) {
return switch (state) {
case "active" -> Node.State.active;
case "dirty" -> Node.State.dirty;
case "failed" -> Node.State.failed;
case "inactive" -> Node.State.inactive;
case "parked" -> Node.State.parked;
case "provisioned" -> Node.State.provisioned;
case "ready" -> Node.State.ready;
case "reserved" -> Node.State.reserved;
case "deprovisioned" -> Node.State.deprovisioned;
case "breakfixed" -> Node.State.breakfixed;
default -> throw new IllegalArgumentException("Unknown node state '" + state + "'");
};
}
static String toString(Node.State state) {
return switch (state) {
case active -> "active";
case dirty -> "dirty";
case failed -> "failed";
case inactive -> "inactive";
case parked -> "parked";
case provisioned -> "provisioned";
case ready -> "ready";
case reserved -> "reserved";
case deprovisioned -> "deprovisioned";
case breakfixed -> "breakfixed";
};
}
} | class NodeSerializer {
/** The configured node flavors */
private final NodeFlavors flavors;
private static final String stateKey = "state";
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String ipAddressPoolKey = "additionalIpAddresses";
private static final String containersKey = "containers";
private static final String containerHostnameKey = "hostname";
private static final String idKey = "openStackId";
private static final String extraIdKey = "extraId";
private static final String parentHostnameKey = "parentHostname";
private static final String historyKey = "history";
private static final String logKey = "log";
private static final String instanceKey = "instance";
private static final String rebootGenerationKey = "rebootGeneration";
private static final String currentRebootGenerationKey = "currentRebootGeneration";
private static final String vespaVersionKey = "vespaVersion";
private static final String currentContainerImageKey = "currentDockerImage";
private static final String failCountKey = "failCount";
private static final String nodeTypeKey = "type";
private static final String wantToRetireKey = "wantToRetire";
private static final String wantToDeprovisionKey = "wantToDeprovision";
private static final String wantToRebuildKey = "wantToRebuild";
private static final String preferToRetireKey = "preferToRetire";
private static final String wantToFailKey = "wantToFailKey";
private static final String wantToUpgradeFlavorKey = "wantToUpgradeFlavor";
private static final String osVersionKey = "osVersion";
private static final String wantedOsVersionKey = "wantedOsVersion";
private static final String firmwareCheckKey = "firmwareCheck";
private static final String reportsKey = "reports";
private static final String modelNameKey = "modelName";
private static final String reservedToKey = "reservedTo";
private static final String exclusiveToApplicationIdKey = "exclusiveTo";
private static final String hostTTLKey = "hostTTL";
private static final String hostEmptyAtKey = "hostEmptyAt";
private static final String exclusiveToClusterTypeKey = "exclusiveToClusterType";
private static final String switchHostnameKey = "switchHostname";
private static final String trustedCertificatesKey = "trustedCertificates";
private static final String cloudAccountKey = "cloudAccount";
private static final String wireguardPubKeyKey = "wireguardPubkey";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String diskKey = "disk";
private static final String tenantIdKey = "tenantId";
private static final String applicationIdKey = "applicationId";
private static final String instanceIdKey = "instanceId";
private static final String serviceIdKey = "serviceId";
private static final String requestedResourcesKey = "requestedResources";
private static final String restartGenerationKey = "restartGeneration";
private static final String currentRestartGenerationKey = "currentRestartGeneration";
private static final String removableKey = "removable";
private static final String reusableKey = "reusable";
private static final String wantedVespaVersionKey = "wantedVespaVersion";
private static final String wantedContainerImageRepoKey = "wantedDockerImageRepo";
private static final String historyEventTypeKey = "type";
private static final String atKey = "at";
private static final String agentKey = "agent";
private static final String networkPortsKey = "networkPorts";
private static final String fingerprintKey = "fingerprint";
private static final String expiresKey = "expires";
public NodeSerializer(NodeFlavors flavors) {
this.flavors = flavors;
}
public byte[] toJson(Node node) {
try {
Slime slime = new Slime();
toSlime(node, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new RuntimeException("Serialization of " + node + " to json failed", e);
}
}
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
object.setString(stateKey, toString(node.state()));
toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey), true);
toSlime(node.ipConfig().pool().ips(), object.setArray(ipAddressPoolKey), true);
toSlime(node.ipConfig().pool().hostnames(), object);
object.setString(idKey, node.id());
node.extraId().ifPresent(id -> object.setString(extraIdKey, id));
node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname));
toSlime(node.flavor(), object);
object.setLong(rebootGenerationKey, node.status().reboot().wanted());
object.setLong(currentRebootGenerationKey, node.status().reboot().current());
node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString()));
node.status().containerImage().ifPresent(image -> object.setString(currentContainerImageKey, image.asString()));
object.setLong(failCountKey, node.status().failCount());
object.setBool(wantToRetireKey, node.status().wantToRetire());
object.setBool(preferToRetireKey, node.status().preferToRetire());
object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision());
object.setBool(wantToFailKey, node.status().wantToFail());
object.setBool(wantToRebuildKey, node.status().wantToRebuild());
object.setBool(wantToUpgradeFlavorKey, node.status().wantToUpgradeFlavor());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history().events(), object.setArray(historyKey));
toSlime(node.history().log(), object.setArray(logKey));
object.setString(nodeTypeKey, toString(node.type()));
node.status().osVersion().current().ifPresent(version -> object.setString(osVersionKey, version.toString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString(wantedOsVersionKey, version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli()));
node.switchHostname().ifPresent(switchHostname -> object.setString(switchHostnameKey, switchHostname));
node.reports().toSlime(object, reportsKey);
node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName));
node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value()));
node.exclusiveToApplicationId().ifPresent(applicationId -> object.setString(exclusiveToApplicationIdKey, applicationId.serializedForm()));
node.hostTTL().ifPresent(hostTTL -> object.setLong(hostTTLKey, hostTTL.toMillis()));
node.hostEmptyAt().ifPresent(emptyAt -> object.setLong(hostEmptyAtKey, emptyAt.toEpochMilli()));
node.exclusiveToClusterType().ifPresent(clusterType -> object.setString(exclusiveToClusterTypeKey, clusterType.name()));
trustedCertificatesToSlime(node.trustedCertificates(), object.setArray(trustedCertificatesKey));
if (!node.cloudAccount().isUnspecified()) {
object.setString(cloudAccountKey, node.cloudAccount().value());
}
node.wireguardPubKey().ifPresent(pubKey -> object.setString(wireguardPubKeyKey, pubKey.value()));
}
private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
if (flavor.flavorOverrides().isPresent()) {
Cursor resourcesObject = object.setObject(resourcesKey);
flavor.flavorOverrides().get().diskGb().ifPresent(diskGb -> resourcesObject.setDouble(diskKey, diskGb));
}
}
else {
NodeResourcesSerializer.toSlime(flavor.resources(), object.setObject(resourcesKey));
}
}
private void toSlime(Allocation allocation, Cursor object) {
NodeResourcesSerializer.toSlime(allocation.requestedResources(), object.setObject(requestedResourcesKey));
object.setString(tenantIdKey, allocation.owner().tenant().value());
object.setString(applicationIdKey, allocation.owner().application().value());
object.setString(instanceIdKey, allocation.owner().instance().value());
object.setString(serviceIdKey, allocation.membership().stringValue());
object.setLong(restartGenerationKey, allocation.restartGeneration().wanted());
object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current());
object.setBool(removableKey, allocation.removable());
object.setBool(reusableKey, allocation.reusable());
object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString());
allocation.membership().cluster().dockerImageRepo().ifPresent(repo -> object.setString(wantedContainerImageRepoKey, repo.untagged()));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(networkPortsKey)));
}
private void toSlime(Collection<History.Event> events, Cursor array) {
for (History.Event event : events)
toSlime(event, array.addObject());
}
private void toSlime(History.Event event, Cursor object) {
object.setString(historyEventTypeKey, toString(event.type()));
object.setLong(atKey, event.at().toEpochMilli());
object.setString(agentKey, toString(event.agent()));
}
private void toSlime(List<HostName> hostnames, Cursor object) {
if (hostnames.isEmpty()) return;
Cursor containersArray = object.setArray(containersKey);
hostnames.forEach(hostname -> {
containersArray.addObject().setString(containerHostnameKey, hostname.value());
});
}
private void trustedCertificatesToSlime(List<TrustStoreItem> trustStoreItems, Cursor array) {
trustStoreItems.forEach(cert -> {
Cursor object = array.addObject();
object.setString(fingerprintKey, cert.fingerprint());
object.setLong(expiresKey, cert.expiry().toEpochMilli());
});
}
public Node fromJson(byte[] data) {
return nodeFromSlime(SlimeUtils.jsonToSlime(data).get());
}
private Node nodeFromSlime(Inspector object) {
Flavor flavor = flavorFromSlime(object);
return new Node(object.field(idKey).asString(),
SlimeUtils.optionalString(object.field(extraIdKey)),
IP.Config.of(ipAddressesFromSlime(object, ipAddressesKey),
ipAddressesFromSlime(object, ipAddressPoolKey),
hostnamesFromSlime(object)),
object.field(hostnameKey).asString(),
SlimeUtils.optionalString(object.field(parentHostnameKey)),
flavor,
statusFromSlime(object),
nodeStateFromString(object.field(stateKey).asString()),
allocationFromSlime(flavor.resources(), object.field(instanceKey)),
historyFromSlime(object),
nodeTypeFromString(object.field(nodeTypeKey).asString()),
Reports.fromSlime(object.field(reportsKey)),
SlimeUtils.optionalString(object.field(modelNameKey)),
SlimeUtils.optionalString(object.field(reservedToKey)).map(TenantName::from),
SlimeUtils.optionalString(object.field(exclusiveToApplicationIdKey)).map(ApplicationId::fromSerializedForm),
SlimeUtils.optionalDuration(object.field(hostTTLKey)),
SlimeUtils.optionalInstant(object.field(hostEmptyAtKey)),
SlimeUtils.optionalString(object.field(exclusiveToClusterTypeKey)).map(ClusterSpec.Type::from),
SlimeUtils.optionalString(object.field(switchHostnameKey)),
trustedCertificatesFromSlime(object),
SlimeUtils.optionalString(object.field(cloudAccountKey)).map(CloudAccount::from).orElse(CloudAccount.empty),
SlimeUtils.optionalString(object.field(wireguardPubKeyKey)).map(WireguardKey::from));
}
private Status statusFromSlime(Inspector object) {
return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey),
versionFromSlime(object.field(vespaVersionKey)),
containerImageFromSlime(object.field(currentContainerImageKey)),
(int) object.field(failCountKey).asLong(),
object.field(wantToRetireKey).asBool(),
object.field(wantToDeprovisionKey).asBool(),
object.field(wantToRebuildKey).asBool(),
object.field(preferToRetireKey).asBool(),
object.field(wantToFailKey).asBool(),
object.field(wantToUpgradeFlavorKey).asBool(),
new OsVersion(versionFromSlime(object.field(osVersionKey)),
versionFromSlime(object.field(wantedOsVersionKey))),
SlimeUtils.optionalInstant(object.field(firmwareCheckKey)));
}
private Flavor flavorFromSlime(Inspector object) {
Inspector resources = object.field(resourcesKey);
if (object.field(flavorKey).valid()) {
Flavor flavor = flavors.getFlavorOrThrow(object.field(flavorKey).asString());
if (!resources.valid()) return flavor;
return flavor.with(FlavorOverrides.ofDisk(resources.field(diskKey).asDouble()));
}
else {
return new Flavor(NodeResourcesSerializer.resourcesFromSlime(resources));
}
}
private Optional<Allocation> allocationFromSlime(NodeResources assignedResources, Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(new Allocation(applicationIdFromSlime(object),
clusterMembershipFromSlime(object),
NodeResourcesSerializer.optionalResourcesFromSlime(object.field(requestedResourcesKey))
.orElse(assignedResources),
generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey),
object.field(removableKey).asBool(),
object.field(reusableKey).asBool(),
NetworkPortsSerializer.fromSlime(object.field(networkPortsKey))));
}
private ApplicationId applicationIdFromSlime(Inspector object) {
return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()),
ApplicationName.from(object.field(applicationIdKey).asString()),
InstanceName.from(object.field(instanceIdKey).asString()));
}
private History historyFromSlime(Inspector object) {
return new History(eventsFromSlime(object.field(historyKey)),
eventsFromSlime(object.field(logKey)));
}
private List<History.Event> eventsFromSlime(Inspector array) {
if (!array.valid()) return List.of();
List<History.Event> events = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> {
History.Event event = eventFromSlime(item);
if (event != null)
events.add(event);
});
return events;
}
private History.Event eventFromSlime(Inspector object) {
History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString());
if (type == null) return null;
Instant at = Instant.ofEpochMilli(object.field(atKey).asLong());
Agent agent = eventAgentFromSlime(object.field(agentKey));
return new History.Event(type, agent, at);
}
private Generation generationFromSlime(Inspector object, String wantedField, String currentField) {
Inspector current = object.field(currentField);
return new Generation(object.field(wantedField).asLong(), current.asLong());
}
private ClusterMembership clusterMembershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(serviceIdKey).asString(),
versionFromSlime(object.field(wantedVespaVersionKey)).get(),
containerImageFromSlime(object.field(wantedContainerImageRepoKey)));
}
private Optional<Version> versionFromSlime(Inspector object) {
return object.valid() ? Optional.of(Version.fromString(object.asString())) : Optional.empty();
}
private Optional<DockerImage> containerImageFromSlime(Inspector object) {
return SlimeUtils.optionalString(object).map(DockerImage::fromString);
}
private List<String> ipAddressesFromSlime(Inspector object, String key) {
var ipAddresses = new ArrayList<String>();
object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString()));
return ipAddresses;
}
private List<HostName> hostnamesFromSlime(Inspector object) {
return SlimeUtils.entriesStream(object.field(containersKey))
.map(elem -> HostName.of(elem.field(containerHostnameKey).asString()))
.toList();
}
private List<TrustStoreItem> trustedCertificatesFromSlime(Inspector object) {
return SlimeUtils.entriesStream(object.field(trustedCertificatesKey))
.map(elem -> new TrustStoreItem(elem.field(fingerprintKey).asString(),
Instant.ofEpochMilli(elem.field(expiresKey).asLong())))
.toList();
}
/** Returns the event type, or null if this event type should be ignored */
private History.Event.Type eventTypeFromString(String eventTypeString) {
return switch (eventTypeString) {
case "provisioned" -> History.Event.Type.provisioned;
case "deprovisioned" -> History.Event.Type.deprovisioned;
case "readied" -> History.Event.Type.readied;
case "reserved" -> History.Event.Type.reserved;
case "activated" -> History.Event.Type.activated;
case "wantToRetire" -> History.Event.Type.wantToRetire;
case "wantToFail" -> History.Event.Type.wantToFail;
case "retired" -> History.Event.Type.retired;
case "deactivated" -> History.Event.Type.deactivated;
case "parked" -> History.Event.Type.parked;
case "failed" -> History.Event.Type.failed;
case "deallocated" -> History.Event.Type.deallocated;
case "down" -> History.Event.Type.down;
case "up" -> History.Event.Type.up;
case "resized" -> History.Event.Type.resized;
case "rebooted" -> History.Event.Type.rebooted;
case "osUpgraded" -> History.Event.Type.osUpgraded;
case "firmwareVerified" -> History.Event.Type.firmwareVerified;
case "breakfixed" -> History.Event.Type.breakfixed;
case "preferToRetire" -> History.Event.Type.preferToRetire;
default -> throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'");
};
}
private String toString(History.Event.Type nodeEventType) {
return switch (nodeEventType) {
case provisioned -> "provisioned";
case deprovisioned -> "deprovisioned";
case readied -> "readied";
case reserved -> "reserved";
case activated -> "activated";
case wantToRetire -> "wantToRetire";
case wantToFail -> "wantToFail";
case retired -> "retired";
case deactivated -> "deactivated";
case parked -> "parked";
case failed -> "failed";
case deallocated -> "deallocated";
case down -> "down";
case up -> "up";
case resized -> "resized";
case rebooted -> "rebooted";
case osUpgraded -> "osUpgraded";
case firmwareVerified -> "firmwareVerified";
case breakfixed -> "breakfixed";
case preferToRetire -> "preferToRetire";
};
}
private Agent eventAgentFromSlime(Inspector eventAgentField) {
return switch (eventAgentField.asString()) {
case "operator" -> Agent.operator;
case "application" -> Agent.application;
case "system" -> Agent.system;
case "nodeAdmin" -> Agent.nodeAdmin;
case "DirtyExpirer" -> Agent.DirtyExpirer;
case "DynamicProvisioningMaintainer", "HostCapacityMaintainer" -> Agent.HostCapacityMaintainer;
case "HostResumeProvisioner" -> Agent.HostResumeProvisioner;
case "FailedExpirer" -> Agent.FailedExpirer;
case "InactiveExpirer" -> Agent.InactiveExpirer;
case "NodeFailer" -> Agent.NodeFailer;
case "NodeHealthTracker" -> Agent.NodeHealthTracker;
case "ProvisionedExpirer" -> Agent.ProvisionedExpirer;
case "Rebalancer" -> Agent.Rebalancer;
case "ReservationExpirer" -> Agent.ReservationExpirer;
case "RetiringUpgrader" -> Agent.RetiringOsUpgrader;
case "RebuildingOsUpgrader" -> Agent.RebuildingOsUpgrader;
case "SpareCapacityMaintainer" -> Agent.SpareCapacityMaintainer;
case "SwitchRebalancer" -> Agent.SwitchRebalancer;
case "HostEncrypter" -> Agent.HostEncrypter;
case "ParkedExpirer" -> Agent.ParkedExpirer;
case "HostFlavorUpgrader" -> Agent.HostFlavorUpgrader;
default -> throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
};
}
private String toString(Agent agent) {
return switch (agent) {
case operator -> "operator";
case application -> "application";
case system -> "system";
case nodeAdmin -> "nodeAdmin";
case DirtyExpirer -> "DirtyExpirer";
case HostCapacityMaintainer -> "DynamicProvisioningMaintainer";
case HostResumeProvisioner -> "HostResumeProvisioner";
case FailedExpirer -> "FailedExpirer";
case InactiveExpirer -> "InactiveExpirer";
case NodeFailer -> "NodeFailer";
case NodeHealthTracker -> "NodeHealthTracker";
case ProvisionedExpirer -> "ProvisionedExpirer";
case Rebalancer -> "Rebalancer";
case ReservationExpirer -> "ReservationExpirer";
case RetiringOsUpgrader -> "RetiringUpgrader";
case RebuildingOsUpgrader -> "RebuildingOsUpgrader";
case SpareCapacityMaintainer -> "SpareCapacityMaintainer";
case SwitchRebalancer -> "SwitchRebalancer";
case HostEncrypter -> "HostEncrypter";
case ParkedExpirer -> "ParkedExpirer";
case HostFlavorUpgrader -> "HostFlavorUpgrader";
};
}
static NodeType nodeTypeFromString(String typeString) {
return switch (typeString) {
case "tenant" -> NodeType.tenant;
case "host" -> NodeType.host;
case "proxy" -> NodeType.proxy;
case "proxyhost" -> NodeType.proxyhost;
case "config" -> NodeType.config;
case "confighost" -> NodeType.confighost;
case "controller" -> NodeType.controller;
case "controllerhost" -> NodeType.controllerhost;
default -> throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
};
}
static String toString(NodeType type) {
return switch (type) {
case tenant -> "tenant";
case host -> "host";
case proxy -> "proxy";
case proxyhost -> "proxyhost";
case config -> "config";
case confighost -> "confighost";
case controller -> "controller";
case controllerhost -> "controllerhost";
};
}
static Node.State nodeStateFromString(String state) {
return switch (state) {
case "active" -> Node.State.active;
case "dirty" -> Node.State.dirty;
case "failed" -> Node.State.failed;
case "inactive" -> Node.State.inactive;
case "parked" -> Node.State.parked;
case "provisioned" -> Node.State.provisioned;
case "ready" -> Node.State.ready;
case "reserved" -> Node.State.reserved;
case "deprovisioned" -> Node.State.deprovisioned;
case "breakfixed" -> Node.State.breakfixed;
default -> throw new IllegalArgumentException("Unknown node state '" + state + "'");
};
}
static String toString(Node.State state) {
return switch (state) {
case active -> "active";
case dirty -> "dirty";
case failed -> "failed";
case inactive -> "inactive";
case parked -> "parked";
case provisioned -> "provisioned";
case ready -> "ready";
case reserved -> "reserved";
case deprovisioned -> "deprovisioned";
case breakfixed -> "breakfixed";
};
}
} |
I have confirmed the order of IPv6 addresses in AWS is out-of-order: On an example host the first additional hostname 'a' has the 9th IPv6 address in additionalIpAddresses. | private void toSlime(List<String> addresses, Cursor array, boolean dummyDueToErasure) {
addresses.stream().map(IP::parse).sorted(IP.NATURAL_ORDER).map(IP::asString).forEach(array::addString);
} | addresses.stream().map(IP::parse).sorted(IP.NATURAL_ORDER).map(IP::asString).forEach(array::addString); | private void toSlime(List<String> addresses, Cursor array, boolean dummyDueToErasure) {
addresses.stream().map(IP::parse).sorted(IP.NATURAL_ORDER).map(IP::asString).forEach(array::addString);
} | class NodeSerializer {
/** The configured node flavors */
private final NodeFlavors flavors;
private static final String stateKey = "state";
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String ipAddressPoolKey = "additionalIpAddresses";
private static final String containersKey = "containers";
private static final String containerHostnameKey = "hostname";
private static final String idKey = "openStackId";
private static final String extraIdKey = "extraId";
private static final String parentHostnameKey = "parentHostname";
private static final String historyKey = "history";
private static final String logKey = "log";
private static final String instanceKey = "instance";
private static final String rebootGenerationKey = "rebootGeneration";
private static final String currentRebootGenerationKey = "currentRebootGeneration";
private static final String vespaVersionKey = "vespaVersion";
private static final String currentContainerImageKey = "currentDockerImage";
private static final String failCountKey = "failCount";
private static final String nodeTypeKey = "type";
private static final String wantToRetireKey = "wantToRetire";
private static final String wantToDeprovisionKey = "wantToDeprovision";
private static final String wantToRebuildKey = "wantToRebuild";
private static final String preferToRetireKey = "preferToRetire";
private static final String wantToFailKey = "wantToFailKey";
private static final String wantToUpgradeFlavorKey = "wantToUpgradeFlavor";
private static final String osVersionKey = "osVersion";
private static final String wantedOsVersionKey = "wantedOsVersion";
private static final String firmwareCheckKey = "firmwareCheck";
private static final String reportsKey = "reports";
private static final String modelNameKey = "modelName";
private static final String reservedToKey = "reservedTo";
private static final String exclusiveToApplicationIdKey = "exclusiveTo";
private static final String hostTTLKey = "hostTTL";
private static final String hostEmptyAtKey = "hostEmptyAt";
private static final String exclusiveToClusterTypeKey = "exclusiveToClusterType";
private static final String switchHostnameKey = "switchHostname";
private static final String trustedCertificatesKey = "trustedCertificates";
private static final String cloudAccountKey = "cloudAccount";
private static final String wireguardPubKeyKey = "wireguardPubkey";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String diskKey = "disk";
private static final String tenantIdKey = "tenantId";
private static final String applicationIdKey = "applicationId";
private static final String instanceIdKey = "instanceId";
private static final String serviceIdKey = "serviceId";
private static final String requestedResourcesKey = "requestedResources";
private static final String restartGenerationKey = "restartGeneration";
private static final String currentRestartGenerationKey = "currentRestartGeneration";
private static final String removableKey = "removable";
private static final String reusableKey = "reusable";
private static final String wantedVespaVersionKey = "wantedVespaVersion";
private static final String wantedContainerImageRepoKey = "wantedDockerImageRepo";
private static final String historyEventTypeKey = "type";
private static final String atKey = "at";
private static final String agentKey = "agent";
private static final String networkPortsKey = "networkPorts";
private static final String fingerprintKey = "fingerprint";
private static final String expiresKey = "expires";
public NodeSerializer(NodeFlavors flavors) {
this.flavors = flavors;
}
public byte[] toJson(Node node) {
try {
Slime slime = new Slime();
toSlime(node, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new RuntimeException("Serialization of " + node + " to json failed", e);
}
}
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
object.setString(stateKey, toString(node.state()));
toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey), true);
toSlime(node.ipConfig().pool().ips(), object.setArray(ipAddressPoolKey), true);
toSlime(node.ipConfig().pool().hostnames(), object);
object.setString(idKey, node.id());
node.extraId().ifPresent(id -> object.setString(extraIdKey, id));
node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname));
toSlime(node.flavor(), object);
object.setLong(rebootGenerationKey, node.status().reboot().wanted());
object.setLong(currentRebootGenerationKey, node.status().reboot().current());
node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString()));
node.status().containerImage().ifPresent(image -> object.setString(currentContainerImageKey, image.asString()));
object.setLong(failCountKey, node.status().failCount());
object.setBool(wantToRetireKey, node.status().wantToRetire());
object.setBool(preferToRetireKey, node.status().preferToRetire());
object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision());
object.setBool(wantToFailKey, node.status().wantToFail());
object.setBool(wantToRebuildKey, node.status().wantToRebuild());
object.setBool(wantToUpgradeFlavorKey, node.status().wantToUpgradeFlavor());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history().events(), object.setArray(historyKey));
toSlime(node.history().log(), object.setArray(logKey));
object.setString(nodeTypeKey, toString(node.type()));
node.status().osVersion().current().ifPresent(version -> object.setString(osVersionKey, version.toString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString(wantedOsVersionKey, version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli()));
node.switchHostname().ifPresent(switchHostname -> object.setString(switchHostnameKey, switchHostname));
node.reports().toSlime(object, reportsKey);
node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName));
node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value()));
node.exclusiveToApplicationId().ifPresent(applicationId -> object.setString(exclusiveToApplicationIdKey, applicationId.serializedForm()));
node.hostTTL().ifPresent(hostTTL -> object.setLong(hostTTLKey, hostTTL.toMillis()));
node.hostEmptyAt().ifPresent(emptyAt -> object.setLong(hostEmptyAtKey, emptyAt.toEpochMilli()));
node.exclusiveToClusterType().ifPresent(clusterType -> object.setString(exclusiveToClusterTypeKey, clusterType.name()));
trustedCertificatesToSlime(node.trustedCertificates(), object.setArray(trustedCertificatesKey));
if (!node.cloudAccount().isUnspecified()) {
object.setString(cloudAccountKey, node.cloudAccount().value());
}
node.wireguardPubKey().ifPresent(pubKey -> object.setString(wireguardPubKeyKey, pubKey.value()));
}
private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
if (flavor.flavorOverrides().isPresent()) {
Cursor resourcesObject = object.setObject(resourcesKey);
flavor.flavorOverrides().get().diskGb().ifPresent(diskGb -> resourcesObject.setDouble(diskKey, diskGb));
}
}
else {
NodeResourcesSerializer.toSlime(flavor.resources(), object.setObject(resourcesKey));
}
}
private void toSlime(Allocation allocation, Cursor object) {
NodeResourcesSerializer.toSlime(allocation.requestedResources(), object.setObject(requestedResourcesKey));
object.setString(tenantIdKey, allocation.owner().tenant().value());
object.setString(applicationIdKey, allocation.owner().application().value());
object.setString(instanceIdKey, allocation.owner().instance().value());
object.setString(serviceIdKey, allocation.membership().stringValue());
object.setLong(restartGenerationKey, allocation.restartGeneration().wanted());
object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current());
object.setBool(removableKey, allocation.removable());
object.setBool(reusableKey, allocation.reusable());
object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString());
allocation.membership().cluster().dockerImageRepo().ifPresent(repo -> object.setString(wantedContainerImageRepoKey, repo.untagged()));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(networkPortsKey)));
}
private void toSlime(Collection<History.Event> events, Cursor array) {
for (History.Event event : events)
toSlime(event, array.addObject());
}
private void toSlime(History.Event event, Cursor object) {
object.setString(historyEventTypeKey, toString(event.type()));
object.setLong(atKey, event.at().toEpochMilli());
object.setString(agentKey, toString(event.agent()));
}
private void toSlime(List<HostName> hostnames, Cursor object) {
if (hostnames.isEmpty()) return;
Cursor containersArray = object.setArray(containersKey);
hostnames.forEach(hostname -> {
containersArray.addObject().setString(containerHostnameKey, hostname.value());
});
}
private void trustedCertificatesToSlime(List<TrustStoreItem> trustStoreItems, Cursor array) {
trustStoreItems.forEach(cert -> {
Cursor object = array.addObject();
object.setString(fingerprintKey, cert.fingerprint());
object.setLong(expiresKey, cert.expiry().toEpochMilli());
});
}
public Node fromJson(byte[] data) {
return nodeFromSlime(SlimeUtils.jsonToSlime(data).get());
}
private Node nodeFromSlime(Inspector object) {
Flavor flavor = flavorFromSlime(object);
return new Node(object.field(idKey).asString(),
SlimeUtils.optionalString(object.field(extraIdKey)),
IP.Config.of(ipAddressesFromSlime(object, ipAddressesKey),
ipAddressesFromSlime(object, ipAddressPoolKey),
hostnamesFromSlime(object)),
object.field(hostnameKey).asString(),
SlimeUtils.optionalString(object.field(parentHostnameKey)),
flavor,
statusFromSlime(object),
nodeStateFromString(object.field(stateKey).asString()),
allocationFromSlime(flavor.resources(), object.field(instanceKey)),
historyFromSlime(object),
nodeTypeFromString(object.field(nodeTypeKey).asString()),
Reports.fromSlime(object.field(reportsKey)),
SlimeUtils.optionalString(object.field(modelNameKey)),
SlimeUtils.optionalString(object.field(reservedToKey)).map(TenantName::from),
SlimeUtils.optionalString(object.field(exclusiveToApplicationIdKey)).map(ApplicationId::fromSerializedForm),
SlimeUtils.optionalDuration(object.field(hostTTLKey)),
SlimeUtils.optionalInstant(object.field(hostEmptyAtKey)),
SlimeUtils.optionalString(object.field(exclusiveToClusterTypeKey)).map(ClusterSpec.Type::from),
SlimeUtils.optionalString(object.field(switchHostnameKey)),
trustedCertificatesFromSlime(object),
SlimeUtils.optionalString(object.field(cloudAccountKey)).map(CloudAccount::from).orElse(CloudAccount.empty),
SlimeUtils.optionalString(object.field(wireguardPubKeyKey)).map(WireguardKey::from));
}
private Status statusFromSlime(Inspector object) {
return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey),
versionFromSlime(object.field(vespaVersionKey)),
containerImageFromSlime(object.field(currentContainerImageKey)),
(int) object.field(failCountKey).asLong(),
object.field(wantToRetireKey).asBool(),
object.field(wantToDeprovisionKey).asBool(),
object.field(wantToRebuildKey).asBool(),
object.field(preferToRetireKey).asBool(),
object.field(wantToFailKey).asBool(),
object.field(wantToUpgradeFlavorKey).asBool(),
new OsVersion(versionFromSlime(object.field(osVersionKey)),
versionFromSlime(object.field(wantedOsVersionKey))),
SlimeUtils.optionalInstant(object.field(firmwareCheckKey)));
}
private Flavor flavorFromSlime(Inspector object) {
Inspector resources = object.field(resourcesKey);
if (object.field(flavorKey).valid()) {
Flavor flavor = flavors.getFlavorOrThrow(object.field(flavorKey).asString());
if (!resources.valid()) return flavor;
return flavor.with(FlavorOverrides.ofDisk(resources.field(diskKey).asDouble()));
}
else {
return new Flavor(NodeResourcesSerializer.resourcesFromSlime(resources));
}
}
private Optional<Allocation> allocationFromSlime(NodeResources assignedResources, Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(new Allocation(applicationIdFromSlime(object),
clusterMembershipFromSlime(object),
NodeResourcesSerializer.optionalResourcesFromSlime(object.field(requestedResourcesKey))
.orElse(assignedResources),
generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey),
object.field(removableKey).asBool(),
object.field(reusableKey).asBool(),
NetworkPortsSerializer.fromSlime(object.field(networkPortsKey))));
}
private ApplicationId applicationIdFromSlime(Inspector object) {
return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()),
ApplicationName.from(object.field(applicationIdKey).asString()),
InstanceName.from(object.field(instanceIdKey).asString()));
}
private History historyFromSlime(Inspector object) {
return new History(eventsFromSlime(object.field(historyKey)),
eventsFromSlime(object.field(logKey)));
}
private List<History.Event> eventsFromSlime(Inspector array) {
if (!array.valid()) return List.of();
List<History.Event> events = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> {
History.Event event = eventFromSlime(item);
if (event != null)
events.add(event);
});
return events;
}
private History.Event eventFromSlime(Inspector object) {
History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString());
if (type == null) return null;
Instant at = Instant.ofEpochMilli(object.field(atKey).asLong());
Agent agent = eventAgentFromSlime(object.field(agentKey));
return new History.Event(type, agent, at);
}
private Generation generationFromSlime(Inspector object, String wantedField, String currentField) {
Inspector current = object.field(currentField);
return new Generation(object.field(wantedField).asLong(), current.asLong());
}
private ClusterMembership clusterMembershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(serviceIdKey).asString(),
versionFromSlime(object.field(wantedVespaVersionKey)).get(),
containerImageFromSlime(object.field(wantedContainerImageRepoKey)));
}
private Optional<Version> versionFromSlime(Inspector object) {
return object.valid() ? Optional.of(Version.fromString(object.asString())) : Optional.empty();
}
private Optional<DockerImage> containerImageFromSlime(Inspector object) {
return SlimeUtils.optionalString(object).map(DockerImage::fromString);
}
private List<String> ipAddressesFromSlime(Inspector object, String key) {
var ipAddresses = new ArrayList<String>();
object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString()));
return ipAddresses;
}
private List<HostName> hostnamesFromSlime(Inspector object) {
return SlimeUtils.entriesStream(object.field(containersKey))
.map(elem -> HostName.of(elem.field(containerHostnameKey).asString()))
.toList();
}
private List<TrustStoreItem> trustedCertificatesFromSlime(Inspector object) {
return SlimeUtils.entriesStream(object.field(trustedCertificatesKey))
.map(elem -> new TrustStoreItem(elem.field(fingerprintKey).asString(),
Instant.ofEpochMilli(elem.field(expiresKey).asLong())))
.toList();
}
/** Returns the event type, or null if this event type should be ignored */
private History.Event.Type eventTypeFromString(String eventTypeString) {
return switch (eventTypeString) {
case "provisioned" -> History.Event.Type.provisioned;
case "deprovisioned" -> History.Event.Type.deprovisioned;
case "readied" -> History.Event.Type.readied;
case "reserved" -> History.Event.Type.reserved;
case "activated" -> History.Event.Type.activated;
case "wantToRetire" -> History.Event.Type.wantToRetire;
case "wantToFail" -> History.Event.Type.wantToFail;
case "retired" -> History.Event.Type.retired;
case "deactivated" -> History.Event.Type.deactivated;
case "parked" -> History.Event.Type.parked;
case "failed" -> History.Event.Type.failed;
case "deallocated" -> History.Event.Type.deallocated;
case "down" -> History.Event.Type.down;
case "up" -> History.Event.Type.up;
case "resized" -> History.Event.Type.resized;
case "rebooted" -> History.Event.Type.rebooted;
case "osUpgraded" -> History.Event.Type.osUpgraded;
case "firmwareVerified" -> History.Event.Type.firmwareVerified;
case "breakfixed" -> History.Event.Type.breakfixed;
case "preferToRetire" -> History.Event.Type.preferToRetire;
default -> throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'");
};
}
private String toString(History.Event.Type nodeEventType) {
return switch (nodeEventType) {
case provisioned -> "provisioned";
case deprovisioned -> "deprovisioned";
case readied -> "readied";
case reserved -> "reserved";
case activated -> "activated";
case wantToRetire -> "wantToRetire";
case wantToFail -> "wantToFail";
case retired -> "retired";
case deactivated -> "deactivated";
case parked -> "parked";
case failed -> "failed";
case deallocated -> "deallocated";
case down -> "down";
case up -> "up";
case resized -> "resized";
case rebooted -> "rebooted";
case osUpgraded -> "osUpgraded";
case firmwareVerified -> "firmwareVerified";
case breakfixed -> "breakfixed";
case preferToRetire -> "preferToRetire";
};
}
private Agent eventAgentFromSlime(Inspector eventAgentField) {
return switch (eventAgentField.asString()) {
case "operator" -> Agent.operator;
case "application" -> Agent.application;
case "system" -> Agent.system;
case "nodeAdmin" -> Agent.nodeAdmin;
case "DirtyExpirer" -> Agent.DirtyExpirer;
case "DynamicProvisioningMaintainer", "HostCapacityMaintainer" -> Agent.HostCapacityMaintainer;
case "HostResumeProvisioner" -> Agent.HostResumeProvisioner;
case "FailedExpirer" -> Agent.FailedExpirer;
case "InactiveExpirer" -> Agent.InactiveExpirer;
case "NodeFailer" -> Agent.NodeFailer;
case "NodeHealthTracker" -> Agent.NodeHealthTracker;
case "ProvisionedExpirer" -> Agent.ProvisionedExpirer;
case "Rebalancer" -> Agent.Rebalancer;
case "ReservationExpirer" -> Agent.ReservationExpirer;
case "RetiringUpgrader" -> Agent.RetiringOsUpgrader;
case "RebuildingOsUpgrader" -> Agent.RebuildingOsUpgrader;
case "SpareCapacityMaintainer" -> Agent.SpareCapacityMaintainer;
case "SwitchRebalancer" -> Agent.SwitchRebalancer;
case "HostEncrypter" -> Agent.HostEncrypter;
case "ParkedExpirer" -> Agent.ParkedExpirer;
case "HostFlavorUpgrader" -> Agent.HostFlavorUpgrader;
default -> throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
};
}
private String toString(Agent agent) {
return switch (agent) {
case operator -> "operator";
case application -> "application";
case system -> "system";
case nodeAdmin -> "nodeAdmin";
case DirtyExpirer -> "DirtyExpirer";
case HostCapacityMaintainer -> "DynamicProvisioningMaintainer";
case HostResumeProvisioner -> "HostResumeProvisioner";
case FailedExpirer -> "FailedExpirer";
case InactiveExpirer -> "InactiveExpirer";
case NodeFailer -> "NodeFailer";
case NodeHealthTracker -> "NodeHealthTracker";
case ProvisionedExpirer -> "ProvisionedExpirer";
case Rebalancer -> "Rebalancer";
case ReservationExpirer -> "ReservationExpirer";
case RetiringOsUpgrader -> "RetiringUpgrader";
case RebuildingOsUpgrader -> "RebuildingOsUpgrader";
case SpareCapacityMaintainer -> "SpareCapacityMaintainer";
case SwitchRebalancer -> "SwitchRebalancer";
case HostEncrypter -> "HostEncrypter";
case ParkedExpirer -> "ParkedExpirer";
case HostFlavorUpgrader -> "HostFlavorUpgrader";
};
}
static NodeType nodeTypeFromString(String typeString) {
return switch (typeString) {
case "tenant" -> NodeType.tenant;
case "host" -> NodeType.host;
case "proxy" -> NodeType.proxy;
case "proxyhost" -> NodeType.proxyhost;
case "config" -> NodeType.config;
case "confighost" -> NodeType.confighost;
case "controller" -> NodeType.controller;
case "controllerhost" -> NodeType.controllerhost;
default -> throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
};
}
static String toString(NodeType type) {
return switch (type) {
case tenant -> "tenant";
case host -> "host";
case proxy -> "proxy";
case proxyhost -> "proxyhost";
case config -> "config";
case confighost -> "confighost";
case controller -> "controller";
case controllerhost -> "controllerhost";
};
}
static Node.State nodeStateFromString(String state) {
return switch (state) {
case "active" -> Node.State.active;
case "dirty" -> Node.State.dirty;
case "failed" -> Node.State.failed;
case "inactive" -> Node.State.inactive;
case "parked" -> Node.State.parked;
case "provisioned" -> Node.State.provisioned;
case "ready" -> Node.State.ready;
case "reserved" -> Node.State.reserved;
case "deprovisioned" -> Node.State.deprovisioned;
case "breakfixed" -> Node.State.breakfixed;
default -> throw new IllegalArgumentException("Unknown node state '" + state + "'");
};
}
static String toString(Node.State state) {
return switch (state) {
case active -> "active";
case dirty -> "dirty";
case failed -> "failed";
case inactive -> "inactive";
case parked -> "parked";
case provisioned -> "provisioned";
case ready -> "ready";
case reserved -> "reserved";
case deprovisioned -> "deprovisioned";
case breakfixed -> "breakfixed";
};
}
} | class NodeSerializer {
/** The configured node flavors */
private final NodeFlavors flavors;
private static final String stateKey = "state";
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String ipAddressPoolKey = "additionalIpAddresses";
private static final String containersKey = "containers";
private static final String containerHostnameKey = "hostname";
private static final String idKey = "openStackId";
private static final String extraIdKey = "extraId";
private static final String parentHostnameKey = "parentHostname";
private static final String historyKey = "history";
private static final String logKey = "log";
private static final String instanceKey = "instance";
private static final String rebootGenerationKey = "rebootGeneration";
private static final String currentRebootGenerationKey = "currentRebootGeneration";
private static final String vespaVersionKey = "vespaVersion";
private static final String currentContainerImageKey = "currentDockerImage";
private static final String failCountKey = "failCount";
private static final String nodeTypeKey = "type";
private static final String wantToRetireKey = "wantToRetire";
private static final String wantToDeprovisionKey = "wantToDeprovision";
private static final String wantToRebuildKey = "wantToRebuild";
private static final String preferToRetireKey = "preferToRetire";
private static final String wantToFailKey = "wantToFailKey";
private static final String wantToUpgradeFlavorKey = "wantToUpgradeFlavor";
private static final String osVersionKey = "osVersion";
private static final String wantedOsVersionKey = "wantedOsVersion";
private static final String firmwareCheckKey = "firmwareCheck";
private static final String reportsKey = "reports";
private static final String modelNameKey = "modelName";
private static final String reservedToKey = "reservedTo";
private static final String exclusiveToApplicationIdKey = "exclusiveTo";
private static final String hostTTLKey = "hostTTL";
private static final String hostEmptyAtKey = "hostEmptyAt";
private static final String exclusiveToClusterTypeKey = "exclusiveToClusterType";
private static final String switchHostnameKey = "switchHostname";
private static final String trustedCertificatesKey = "trustedCertificates";
private static final String cloudAccountKey = "cloudAccount";
private static final String wireguardPubKeyKey = "wireguardPubkey";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String diskKey = "disk";
private static final String tenantIdKey = "tenantId";
private static final String applicationIdKey = "applicationId";
private static final String instanceIdKey = "instanceId";
private static final String serviceIdKey = "serviceId";
private static final String requestedResourcesKey = "requestedResources";
private static final String restartGenerationKey = "restartGeneration";
private static final String currentRestartGenerationKey = "currentRestartGeneration";
private static final String removableKey = "removable";
private static final String reusableKey = "reusable";
private static final String wantedVespaVersionKey = "wantedVespaVersion";
private static final String wantedContainerImageRepoKey = "wantedDockerImageRepo";
private static final String historyEventTypeKey = "type";
private static final String atKey = "at";
private static final String agentKey = "agent";
private static final String networkPortsKey = "networkPorts";
private static final String fingerprintKey = "fingerprint";
private static final String expiresKey = "expires";
public NodeSerializer(NodeFlavors flavors) {
this.flavors = flavors;
}
public byte[] toJson(Node node) {
try {
Slime slime = new Slime();
toSlime(node, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new RuntimeException("Serialization of " + node + " to json failed", e);
}
}
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
object.setString(stateKey, toString(node.state()));
toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey), true);
toSlime(node.ipConfig().pool().ips(), object.setArray(ipAddressPoolKey), true);
toSlime(node.ipConfig().pool().hostnames(), object);
object.setString(idKey, node.id());
node.extraId().ifPresent(id -> object.setString(extraIdKey, id));
node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname));
toSlime(node.flavor(), object);
object.setLong(rebootGenerationKey, node.status().reboot().wanted());
object.setLong(currentRebootGenerationKey, node.status().reboot().current());
node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString()));
node.status().containerImage().ifPresent(image -> object.setString(currentContainerImageKey, image.asString()));
object.setLong(failCountKey, node.status().failCount());
object.setBool(wantToRetireKey, node.status().wantToRetire());
object.setBool(preferToRetireKey, node.status().preferToRetire());
object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision());
object.setBool(wantToFailKey, node.status().wantToFail());
object.setBool(wantToRebuildKey, node.status().wantToRebuild());
object.setBool(wantToUpgradeFlavorKey, node.status().wantToUpgradeFlavor());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history().events(), object.setArray(historyKey));
toSlime(node.history().log(), object.setArray(logKey));
object.setString(nodeTypeKey, toString(node.type()));
node.status().osVersion().current().ifPresent(version -> object.setString(osVersionKey, version.toString()));
node.status().osVersion().wanted().ifPresent(version -> object.setString(wantedOsVersionKey, version.toFullString()));
node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli()));
node.switchHostname().ifPresent(switchHostname -> object.setString(switchHostnameKey, switchHostname));
node.reports().toSlime(object, reportsKey);
node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName));
node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value()));
node.exclusiveToApplicationId().ifPresent(applicationId -> object.setString(exclusiveToApplicationIdKey, applicationId.serializedForm()));
node.hostTTL().ifPresent(hostTTL -> object.setLong(hostTTLKey, hostTTL.toMillis()));
node.hostEmptyAt().ifPresent(emptyAt -> object.setLong(hostEmptyAtKey, emptyAt.toEpochMilli()));
node.exclusiveToClusterType().ifPresent(clusterType -> object.setString(exclusiveToClusterTypeKey, clusterType.name()));
trustedCertificatesToSlime(node.trustedCertificates(), object.setArray(trustedCertificatesKey));
if (!node.cloudAccount().isUnspecified()) {
object.setString(cloudAccountKey, node.cloudAccount().value());
}
node.wireguardPubKey().ifPresent(pubKey -> object.setString(wireguardPubKeyKey, pubKey.value()));
}
private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
if (flavor.flavorOverrides().isPresent()) {
Cursor resourcesObject = object.setObject(resourcesKey);
flavor.flavorOverrides().get().diskGb().ifPresent(diskGb -> resourcesObject.setDouble(diskKey, diskGb));
}
}
else {
NodeResourcesSerializer.toSlime(flavor.resources(), object.setObject(resourcesKey));
}
}
private void toSlime(Allocation allocation, Cursor object) {
NodeResourcesSerializer.toSlime(allocation.requestedResources(), object.setObject(requestedResourcesKey));
object.setString(tenantIdKey, allocation.owner().tenant().value());
object.setString(applicationIdKey, allocation.owner().application().value());
object.setString(instanceIdKey, allocation.owner().instance().value());
object.setString(serviceIdKey, allocation.membership().stringValue());
object.setLong(restartGenerationKey, allocation.restartGeneration().wanted());
object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current());
object.setBool(removableKey, allocation.removable());
object.setBool(reusableKey, allocation.reusable());
object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString());
allocation.membership().cluster().dockerImageRepo().ifPresent(repo -> object.setString(wantedContainerImageRepoKey, repo.untagged()));
allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(networkPortsKey)));
}
private void toSlime(Collection<History.Event> events, Cursor array) {
for (History.Event event : events)
toSlime(event, array.addObject());
}
private void toSlime(History.Event event, Cursor object) {
object.setString(historyEventTypeKey, toString(event.type()));
object.setLong(atKey, event.at().toEpochMilli());
object.setString(agentKey, toString(event.agent()));
}
private void toSlime(List<HostName> hostnames, Cursor object) {
if (hostnames.isEmpty()) return;
Cursor containersArray = object.setArray(containersKey);
hostnames.forEach(hostname -> {
containersArray.addObject().setString(containerHostnameKey, hostname.value());
});
}
private void trustedCertificatesToSlime(List<TrustStoreItem> trustStoreItems, Cursor array) {
trustStoreItems.forEach(cert -> {
Cursor object = array.addObject();
object.setString(fingerprintKey, cert.fingerprint());
object.setLong(expiresKey, cert.expiry().toEpochMilli());
});
}
public Node fromJson(byte[] data) {
return nodeFromSlime(SlimeUtils.jsonToSlime(data).get());
}
private Node nodeFromSlime(Inspector object) {
Flavor flavor = flavorFromSlime(object);
return new Node(object.field(idKey).asString(),
SlimeUtils.optionalString(object.field(extraIdKey)),
IP.Config.of(ipAddressesFromSlime(object, ipAddressesKey),
ipAddressesFromSlime(object, ipAddressPoolKey),
hostnamesFromSlime(object)),
object.field(hostnameKey).asString(),
SlimeUtils.optionalString(object.field(parentHostnameKey)),
flavor,
statusFromSlime(object),
nodeStateFromString(object.field(stateKey).asString()),
allocationFromSlime(flavor.resources(), object.field(instanceKey)),
historyFromSlime(object),
nodeTypeFromString(object.field(nodeTypeKey).asString()),
Reports.fromSlime(object.field(reportsKey)),
SlimeUtils.optionalString(object.field(modelNameKey)),
SlimeUtils.optionalString(object.field(reservedToKey)).map(TenantName::from),
SlimeUtils.optionalString(object.field(exclusiveToApplicationIdKey)).map(ApplicationId::fromSerializedForm),
SlimeUtils.optionalDuration(object.field(hostTTLKey)),
SlimeUtils.optionalInstant(object.field(hostEmptyAtKey)),
SlimeUtils.optionalString(object.field(exclusiveToClusterTypeKey)).map(ClusterSpec.Type::from),
SlimeUtils.optionalString(object.field(switchHostnameKey)),
trustedCertificatesFromSlime(object),
SlimeUtils.optionalString(object.field(cloudAccountKey)).map(CloudAccount::from).orElse(CloudAccount.empty),
SlimeUtils.optionalString(object.field(wireguardPubKeyKey)).map(WireguardKey::from));
}
private Status statusFromSlime(Inspector object) {
return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey),
versionFromSlime(object.field(vespaVersionKey)),
containerImageFromSlime(object.field(currentContainerImageKey)),
(int) object.field(failCountKey).asLong(),
object.field(wantToRetireKey).asBool(),
object.field(wantToDeprovisionKey).asBool(),
object.field(wantToRebuildKey).asBool(),
object.field(preferToRetireKey).asBool(),
object.field(wantToFailKey).asBool(),
object.field(wantToUpgradeFlavorKey).asBool(),
new OsVersion(versionFromSlime(object.field(osVersionKey)),
versionFromSlime(object.field(wantedOsVersionKey))),
SlimeUtils.optionalInstant(object.field(firmwareCheckKey)));
}
private Flavor flavorFromSlime(Inspector object) {
Inspector resources = object.field(resourcesKey);
if (object.field(flavorKey).valid()) {
Flavor flavor = flavors.getFlavorOrThrow(object.field(flavorKey).asString());
if (!resources.valid()) return flavor;
return flavor.with(FlavorOverrides.ofDisk(resources.field(diskKey).asDouble()));
}
else {
return new Flavor(NodeResourcesSerializer.resourcesFromSlime(resources));
}
}
private Optional<Allocation> allocationFromSlime(NodeResources assignedResources, Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(new Allocation(applicationIdFromSlime(object),
clusterMembershipFromSlime(object),
NodeResourcesSerializer.optionalResourcesFromSlime(object.field(requestedResourcesKey))
.orElse(assignedResources),
generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey),
object.field(removableKey).asBool(),
object.field(reusableKey).asBool(),
NetworkPortsSerializer.fromSlime(object.field(networkPortsKey))));
}
private ApplicationId applicationIdFromSlime(Inspector object) {
return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()),
ApplicationName.from(object.field(applicationIdKey).asString()),
InstanceName.from(object.field(instanceIdKey).asString()));
}
private History historyFromSlime(Inspector object) {
return new History(eventsFromSlime(object.field(historyKey)),
eventsFromSlime(object.field(logKey)));
}
private List<History.Event> eventsFromSlime(Inspector array) {
if (!array.valid()) return List.of();
List<History.Event> events = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> {
History.Event event = eventFromSlime(item);
if (event != null)
events.add(event);
});
return events;
}
private History.Event eventFromSlime(Inspector object) {
History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString());
if (type == null) return null;
Instant at = Instant.ofEpochMilli(object.field(atKey).asLong());
Agent agent = eventAgentFromSlime(object.field(agentKey));
return new History.Event(type, agent, at);
}
private Generation generationFromSlime(Inspector object, String wantedField, String currentField) {
Inspector current = object.field(currentField);
return new Generation(object.field(wantedField).asLong(), current.asLong());
}
private ClusterMembership clusterMembershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(serviceIdKey).asString(),
versionFromSlime(object.field(wantedVespaVersionKey)).get(),
containerImageFromSlime(object.field(wantedContainerImageRepoKey)));
}
private Optional<Version> versionFromSlime(Inspector object) {
return object.valid() ? Optional.of(Version.fromString(object.asString())) : Optional.empty();
}
private Optional<DockerImage> containerImageFromSlime(Inspector object) {
return SlimeUtils.optionalString(object).map(DockerImage::fromString);
}
private List<String> ipAddressesFromSlime(Inspector object, String key) {
var ipAddresses = new ArrayList<String>();
object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString()));
return ipAddresses;
}
private List<HostName> hostnamesFromSlime(Inspector object) {
return SlimeUtils.entriesStream(object.field(containersKey))
.map(elem -> HostName.of(elem.field(containerHostnameKey).asString()))
.toList();
}
private List<TrustStoreItem> trustedCertificatesFromSlime(Inspector object) {
return SlimeUtils.entriesStream(object.field(trustedCertificatesKey))
.map(elem -> new TrustStoreItem(elem.field(fingerprintKey).asString(),
Instant.ofEpochMilli(elem.field(expiresKey).asLong())))
.toList();
}
/** Returns the event type, or null if this event type should be ignored */
private History.Event.Type eventTypeFromString(String eventTypeString) {
return switch (eventTypeString) {
case "provisioned" -> History.Event.Type.provisioned;
case "deprovisioned" -> History.Event.Type.deprovisioned;
case "readied" -> History.Event.Type.readied;
case "reserved" -> History.Event.Type.reserved;
case "activated" -> History.Event.Type.activated;
case "wantToRetire" -> History.Event.Type.wantToRetire;
case "wantToFail" -> History.Event.Type.wantToFail;
case "retired" -> History.Event.Type.retired;
case "deactivated" -> History.Event.Type.deactivated;
case "parked" -> History.Event.Type.parked;
case "failed" -> History.Event.Type.failed;
case "deallocated" -> History.Event.Type.deallocated;
case "down" -> History.Event.Type.down;
case "up" -> History.Event.Type.up;
case "resized" -> History.Event.Type.resized;
case "rebooted" -> History.Event.Type.rebooted;
case "osUpgraded" -> History.Event.Type.osUpgraded;
case "firmwareVerified" -> History.Event.Type.firmwareVerified;
case "breakfixed" -> History.Event.Type.breakfixed;
case "preferToRetire" -> History.Event.Type.preferToRetire;
default -> throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'");
};
}
private String toString(History.Event.Type nodeEventType) {
return switch (nodeEventType) {
case provisioned -> "provisioned";
case deprovisioned -> "deprovisioned";
case readied -> "readied";
case reserved -> "reserved";
case activated -> "activated";
case wantToRetire -> "wantToRetire";
case wantToFail -> "wantToFail";
case retired -> "retired";
case deactivated -> "deactivated";
case parked -> "parked";
case failed -> "failed";
case deallocated -> "deallocated";
case down -> "down";
case up -> "up";
case resized -> "resized";
case rebooted -> "rebooted";
case osUpgraded -> "osUpgraded";
case firmwareVerified -> "firmwareVerified";
case breakfixed -> "breakfixed";
case preferToRetire -> "preferToRetire";
};
}
private Agent eventAgentFromSlime(Inspector eventAgentField) {
return switch (eventAgentField.asString()) {
case "operator" -> Agent.operator;
case "application" -> Agent.application;
case "system" -> Agent.system;
case "nodeAdmin" -> Agent.nodeAdmin;
case "DirtyExpirer" -> Agent.DirtyExpirer;
case "DynamicProvisioningMaintainer", "HostCapacityMaintainer" -> Agent.HostCapacityMaintainer;
case "HostResumeProvisioner" -> Agent.HostResumeProvisioner;
case "FailedExpirer" -> Agent.FailedExpirer;
case "InactiveExpirer" -> Agent.InactiveExpirer;
case "NodeFailer" -> Agent.NodeFailer;
case "NodeHealthTracker" -> Agent.NodeHealthTracker;
case "ProvisionedExpirer" -> Agent.ProvisionedExpirer;
case "Rebalancer" -> Agent.Rebalancer;
case "ReservationExpirer" -> Agent.ReservationExpirer;
case "RetiringUpgrader" -> Agent.RetiringOsUpgrader;
case "RebuildingOsUpgrader" -> Agent.RebuildingOsUpgrader;
case "SpareCapacityMaintainer" -> Agent.SpareCapacityMaintainer;
case "SwitchRebalancer" -> Agent.SwitchRebalancer;
case "HostEncrypter" -> Agent.HostEncrypter;
case "ParkedExpirer" -> Agent.ParkedExpirer;
case "HostFlavorUpgrader" -> Agent.HostFlavorUpgrader;
default -> throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
};
}
private String toString(Agent agent) {
return switch (agent) {
case operator -> "operator";
case application -> "application";
case system -> "system";
case nodeAdmin -> "nodeAdmin";
case DirtyExpirer -> "DirtyExpirer";
case HostCapacityMaintainer -> "DynamicProvisioningMaintainer";
case HostResumeProvisioner -> "HostResumeProvisioner";
case FailedExpirer -> "FailedExpirer";
case InactiveExpirer -> "InactiveExpirer";
case NodeFailer -> "NodeFailer";
case NodeHealthTracker -> "NodeHealthTracker";
case ProvisionedExpirer -> "ProvisionedExpirer";
case Rebalancer -> "Rebalancer";
case ReservationExpirer -> "ReservationExpirer";
case RetiringOsUpgrader -> "RetiringUpgrader";
case RebuildingOsUpgrader -> "RebuildingOsUpgrader";
case SpareCapacityMaintainer -> "SpareCapacityMaintainer";
case SwitchRebalancer -> "SwitchRebalancer";
case HostEncrypter -> "HostEncrypter";
case ParkedExpirer -> "ParkedExpirer";
case HostFlavorUpgrader -> "HostFlavorUpgrader";
};
}
static NodeType nodeTypeFromString(String typeString) {
return switch (typeString) {
case "tenant" -> NodeType.tenant;
case "host" -> NodeType.host;
case "proxy" -> NodeType.proxy;
case "proxyhost" -> NodeType.proxyhost;
case "config" -> NodeType.config;
case "confighost" -> NodeType.confighost;
case "controller" -> NodeType.controller;
case "controllerhost" -> NodeType.controllerhost;
default -> throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
};
}
static String toString(NodeType type) {
return switch (type) {
case tenant -> "tenant";
case host -> "host";
case proxy -> "proxy";
case proxyhost -> "proxyhost";
case config -> "config";
case confighost -> "confighost";
case controller -> "controller";
case controllerhost -> "controllerhost";
};
}
static Node.State nodeStateFromString(String state) {
return switch (state) {
case "active" -> Node.State.active;
case "dirty" -> Node.State.dirty;
case "failed" -> Node.State.failed;
case "inactive" -> Node.State.inactive;
case "parked" -> Node.State.parked;
case "provisioned" -> Node.State.provisioned;
case "ready" -> Node.State.ready;
case "reserved" -> Node.State.reserved;
case "deprovisioned" -> Node.State.deprovisioned;
case "breakfixed" -> Node.State.breakfixed;
default -> throw new IllegalArgumentException("Unknown node state '" + state + "'");
};
}
static String toString(Node.State state) {
return switch (state) {
case active -> "active";
case dirty -> "dirty";
case failed -> "failed";
case inactive -> "inactive";
case parked -> "parked";
case provisioned -> "provisioned";
case ready -> "ready";
case reserved -> "reserved";
case deprovisioned -> "deprovisioned";
case breakfixed -> "breakfixed";
};
}
} |
🐛 | protected OrderedTensorType lazyGetType() {
if (!allInputTypesPresent(2)) return null;
inputs.get(0).exportAsRankingFunction = true;
IntermediateOperation repeats = inputs.get(1);
if (repeats.getConstantValue().isEmpty())
throw new IllegalArgumentException("Tile " + name + ": repeats input must be a constant.");
Tensor shape = repeats.getConstantValue().get().asTensor();
if (shape.type().rank() != 1)
throw new IllegalArgumentException("Tile " + name + ": repeats must be a 1-d tensor.");
OrderedTensorType inputType = inputs.get(0).type().get();
if (shape.type().dimensions().get(0).size().get().intValue() != inputType.rank())
throw new IllegalArgumentException("Tile " + name + ": repeats must be the same size as input rank.");
List<Integer> dimSizes = new ArrayList<>(inputType.rank());
shape.valueIterator().forEachRemaining(v -> dimSizes.add(v.intValue()));
OrderedTensorType.Builder typeBuilder = new OrderedTensorType.Builder(resultValueType());
for (int i = 0; i < dimSizes.size(); ++i) {
TensorType.Dimension inputDimension = inputType.dimensions().get(i);
typeBuilder.add(TensorType.Dimension.indexed(inputDimension.name(), inputDimension.size().get() * dimSizes.get(i)));
}
return typeBuilder.build();
} | if (shape.type().dimensions().get(0).size().get().intValue() != inputType.rank()) | protected OrderedTensorType lazyGetType() {
if (!allInputTypesPresent(2)) return null;
inputs.get(0).exportAsRankingFunction = true;
IntermediateOperation repeats = inputs.get(1);
if (repeats.getConstantValue().isEmpty())
throw new IllegalArgumentException("Tile " + name + ": repeats input must be a constant.");
Tensor shape = repeats.getConstantValue().get().asTensor();
if (shape.type().rank() != 1)
throw new IllegalArgumentException("Tile " + name + ": repeats must be a 1-d tensor.");
OrderedTensorType inputType = inputs.get(0).type().get();
if (shape.type().dimensions().get(0).size().get().intValue() != inputType.rank())
throw new IllegalArgumentException("Tile " + name + ": repeats must be the same size as input rank.");
List<Integer> dimSizes = new ArrayList<>(inputType.rank());
shape.valueIterator().forEachRemaining(v -> dimSizes.add(v.intValue()));
OrderedTensorType.Builder typeBuilder = new OrderedTensorType.Builder(resultValueType());
for (int i = 0; i < dimSizes.size(); ++i) {
TensorType.Dimension inputDimension = inputType.dimensions().get(i);
typeBuilder.add(TensorType.Dimension.indexed(inputDimension.name(), inputDimension.size().get() * dimSizes.get(i)));
}
return typeBuilder.build();
} | class Tile extends IntermediateOperation {
public Tile(String modelName, String nodeName, List<IntermediateOperation> inputs) {
super(modelName, nodeName, inputs);
}
@Override
@Override
protected TensorFunction<Reference> lazyGetFunction() {
if (!allInputFunctionsPresent(2)) return null;
IntermediateOperation input = inputs.get(0);
OrderedTensorType inputType = input.type().get();
String inputFunctionName = input.rankingExpressionFunctionName();
List<com.yahoo.tensor.functions.Slice.DimensionValue<Reference>> dimensionValues = new ArrayList<>();
for (int axis = 0; axis < inputType.rank(); ++axis) {
String inputDimensionName = inputType.dimensions().get(axis).name();
long inputDimensionSize = inputType.dimensions().get(axis).size().get();
ExpressionNode size = new ConstantNode(new DoubleValue(inputDimensionSize));
ExpressionNode reference = new ReferenceNode(inputDimensionName);
ExpressionNode mod = new OperationNode(reference, Operator.modulo, size);
dimensionValues.add(new com.yahoo.tensor.functions.Slice.DimensionValue<>(Optional.of(inputDimensionName), wrapScalar(new EmbracedNode(mod))));
}
TensorFunction<Reference> inputIndices = new TensorFunctionNode.ExpressionTensorFunction(new ReferenceNode(inputFunctionName));
com.yahoo.tensor.functions.Slice<Reference> sliceIndices = new com.yahoo.tensor.functions.Slice<>(inputIndices, dimensionValues);
ExpressionNode sliceExpression = new TensorFunctionNode(sliceIndices);
TensorFunction<Reference> generate = Generate.bound(type.type(), wrapScalar(sliceExpression));
return generate;
}
@Override
public Tile withInputs(List<IntermediateOperation> inputs) {
return new Tile(modelName(), name(), inputs);
}
@Override
public String operationName() { return "Tile"; }
} | class Tile extends IntermediateOperation {
public Tile(String modelName, String nodeName, List<IntermediateOperation> inputs) {
super(modelName, nodeName, inputs);
}
@Override
@Override
protected TensorFunction<Reference> lazyGetFunction() {
if (!allInputFunctionsPresent(2)) return null;
IntermediateOperation input = inputs.get(0);
OrderedTensorType inputType = input.type().get();
String inputFunctionName = input.rankingExpressionFunctionName();
List<com.yahoo.tensor.functions.Slice.DimensionValue<Reference>> dimensionValues = new ArrayList<>();
for (int axis = 0; axis < inputType.rank(); ++axis) {
String inputDimensionName = inputType.dimensions().get(axis).name();
long inputDimensionSize = inputType.dimensions().get(axis).size().get();
ExpressionNode size = new ConstantNode(new DoubleValue(inputDimensionSize));
ExpressionNode reference = new ReferenceNode(inputDimensionName);
ExpressionNode mod = new OperationNode(reference, Operator.modulo, size);
dimensionValues.add(new com.yahoo.tensor.functions.Slice.DimensionValue<>(Optional.of(inputDimensionName), wrapScalar(new EmbracedNode(mod))));
}
TensorFunction<Reference> inputIndices = new TensorFunctionNode.ExpressionTensorFunction(new ReferenceNode(inputFunctionName));
com.yahoo.tensor.functions.Slice<Reference> sliceIndices = new com.yahoo.tensor.functions.Slice<>(inputIndices, dimensionValues);
ExpressionNode sliceExpression = new TensorFunctionNode(sliceIndices);
TensorFunction<Reference> generate = Generate.bound(type.type(), wrapScalar(sliceExpression));
return generate;
}
@Override
public Tile withInputs(List<IntermediateOperation> inputs) {
return new Tile(modelName(), name(), inputs);
}
@Override
public String operationName() { return "Tile"; }
} |
Consider converting this to `URI`, then this could be a `switch` on `uri.getScheme()`. I guess `UrlDownloader` only supports `http`/`https`? Rest should fail. In `S3Downloader` the bucket name will be `url.getHost()`. | private static Downloader downloader(String url) {
return url.startsWith("s3:
} | return url.startsWith("s3: | private static Downloader downloader(String url) {
URI uri = new URI(url);
return switch (uri.getScheme()) {
case "http", "https" -> new UrlDownloader();
case "s3" -> new S3Downloader();
default -> throw new IllegalArgumentException("Unsupported scheme '" + uri.getScheme() + "'");
};
} | class UrlDownloadRpcServer {
private static final Logger log = Logger.getLogger(UrlDownloadRpcServer.class.getName());
static final File defaultDownloadDirectory = new File(Defaults.getDefaults().underVespaHome("var/db/vespa/download"));
private final File rootDownloadDir;
private final ExecutorService executor = newFixedThreadPool(Math.max(8, getRuntime().availableProcessors()),
new DaemonThreadFactory("Rpc URL download executor"));
UrlDownloadRpcServer(Supervisor supervisor) {
this.rootDownloadDir = defaultDownloadDirectory;
supervisor.addMethod(new Method("url.waitFor", "s", "s", this::download)
.requireCapabilities(Capability.CONFIGPROXY__FILEDISTRIBUTION_API)
.methodDesc("get path to url download")
.paramDesc(0, "url", "url")
.returnDesc(0, "path", "path to file"));
}
void close() {
executor.shutdownNow();
try {
if ( ! executor.awaitTermination(10, TimeUnit.SECONDS))
log.log(WARNING, "Failed to shut down url download rpc server within timeout");
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
private void download(Request req) {
req.detach();
executor.execute(() -> downloadFile(req));
}
private void downloadFile(Request req) {
String url = req.parameters().get(0).asString();
File downloadDir = new File(rootDownloadDir, urlToDirName(url));
Downloader downloader = downloader(url);
if (downloader.alreadyDownloaded(downloader, downloadDir)) {
log.log(Level.INFO, "URL '" + url + "' already downloaded");
req.returnValues().add(new StringValue(new File(downloadDir, downloader.fileName()).getAbsolutePath()));
req.returnRequest();
return;
}
try {
Optional<File> file = downloader.downloadFile(url, downloadDir);
if (file.isPresent())
req.returnValues().add(new StringValue(file.get().getAbsolutePath()));
else
req.setError(DOES_NOT_EXIST, "URL '" + url + "' not found");
} catch (RuntimeException e) {
logAndSetRpcError(req, url, e, HTTP_ERROR);
} catch (Throwable e) {
logAndSetRpcError(req, url, e, INTERNAL_ERROR);
}
req.returnRequest();
}
private static void logAndSetRpcError(Request req, String url, Throwable e, int rpcErrorCode) {
String message = "Download of '" + url + "' failed: " + Exceptions.toMessageString(e);
log.log(Level.SEVERE, message);
req.setError(rpcErrorCode, e.getMessage());
}
private static String urlToDirName(String uri) {
return String.valueOf(XXHashFactory.fastestJavaInstance().hash64().hash(ByteBuffer.wrap(Utf8.toBytes(uri)), 0));
}
} | class UrlDownloadRpcServer {
private static final Logger log = Logger.getLogger(UrlDownloadRpcServer.class.getName());
static final File defaultDownloadDirectory = new File(Defaults.getDefaults().underVespaHome("var/db/vespa/download"));
private final File rootDownloadDir;
private final ExecutorService executor = newFixedThreadPool(Math.max(8, getRuntime().availableProcessors()),
new DaemonThreadFactory("Rpc URL download executor"));
UrlDownloadRpcServer(Supervisor supervisor) {
this.rootDownloadDir = defaultDownloadDirectory;
supervisor.addMethod(new Method("url.waitFor", "s", "s", this::download)
.requireCapabilities(Capability.CONFIGPROXY__FILEDISTRIBUTION_API)
.methodDesc("get path to url download")
.paramDesc(0, "url", "url")
.returnDesc(0, "path", "path to file"));
}
void close() {
executor.shutdownNow();
try {
if ( ! executor.awaitTermination(10, TimeUnit.SECONDS))
log.log(WARNING, "Failed to shut down url download rpc server within timeout");
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
private void download(Request req) {
req.detach();
executor.execute(() -> downloadFile(req));
}
private void downloadFile(Request req) {
String url = req.parameters().get(0).asString();
File downloadDir = new File(rootDownloadDir, urlToDirName(url));
Downloader downloader = downloader(url);
if (downloader.alreadyDownloaded(downloader, downloadDir)) {
log.log(Level.INFO, "URL '" + url + "' already downloaded");
req.returnValues().add(new StringValue(new File(downloadDir, downloader.fileName()).getAbsolutePath()));
req.returnRequest();
return;
}
try {
Files.createDirectories(downloadDir.toPath());
Optional<File> file = downloader.downloadFile(url, downloadDir);
if (file.isPresent())
req.returnValues().add(new StringValue(file.get().getAbsolutePath()));
else
req.setError(DOES_NOT_EXIST, "URL '" + url + "' not found");
} catch (RuntimeException e) {
logAndSetRpcError(req, url, e, HTTP_ERROR);
} catch (Throwable e) {
logAndSetRpcError(req, url, e, INTERNAL_ERROR);
}
req.returnRequest();
}
private static void logAndSetRpcError(Request req, String url, Throwable e, int rpcErrorCode) {
String message = "Download of '" + url + "' failed: " + Exceptions.toMessageString(e);
log.log(Level.SEVERE, message);
req.setError(rpcErrorCode, e.getMessage());
}
private static String urlToDirName(String uri) {
return String.valueOf(XXHashFactory.fastestJavaInstance().hash64().hash(ByteBuffer.wrap(Utf8.toBytes(uri)), 0));
}
} |
`UrlDownloader` also does `Files.createDirectories(downloadDir.toPath());`. Is this needed here/not needed there? | public Optional<File> downloadFile(String url, File targetDir) throws IOException {
AmazonS3URI s3URI = new AmazonS3URI(url);
S3Object s3Object = s3Client.getObject(s3URI.getBucket(), s3URI.getKey());
File file = new File(targetDir, fileName());
Files.copy(s3Object.getObjectContent(), file.toPath());
return Optional.of(file);
} | Files.copy(s3Object.getObjectContent(), file.toPath()); | public Optional<File> downloadFile(String url, File targetDir) throws IOException {
AmazonS3URI s3URI = new AmazonS3URI(url);
S3Object s3Object = s3Client.getObject(s3URI.getBucket(), s3URI.getKey());
File file = new File(targetDir, fileName());
Files.copy(s3Object.getObjectContent(), file.toPath());
return Optional.of(file);
} | class S3Downloader implements Downloader {
private final AmazonS3 s3Client;
S3Downloader() {
this.s3Client = AmazonS3ClientBuilder.standard()
.withRegion(System.getenv("VESPA_CLOUD_NATIVE_REGION"))
.withCredentials(new CredentialsProvider())
.build();
}
@Override
private static class CredentialsProvider implements AWSCredentialsProvider {
private static final String DEFAULT_CREDENTIALS_PATH = Defaults.getDefaults()
.underVespaHome("var/vespa/aws/credentials.json");
private final Path credentialsPath;
public CredentialsProvider() {
this.credentialsPath = Path.of(DEFAULT_CREDENTIALS_PATH);
}
@Override
public AWSCredentials getCredentials() { return readCredentials(); }
@Override
public void refresh() { readCredentials(); }
private AWSSessionCredentials readCredentials() {
try {
Slime slime = SlimeUtils.jsonToSlime(Files.readAllBytes(credentialsPath));
Cursor cursor = slime.get();
String accessKey = cursor.field("awsAccessKey").asString();
String secretKey = cursor.field("awsSecretKey").asString();
String sessionToken = cursor.field("sessionToken").asString();
return new BasicSessionCredentials(accessKey, secretKey, sessionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
} | class S3Downloader implements Downloader {
private final AmazonS3 s3Client;
S3Downloader() {
this.s3Client = AmazonS3ClientBuilder.standard()
.withRegion(System.getenv("VESPA_CLOUD_NATIVE_REGION"))
.withCredentials(new CredentialsProvider())
.build();
}
@Override
private static class CredentialsProvider implements AWSCredentialsProvider {
private static final String DEFAULT_CREDENTIALS_PATH = Defaults.getDefaults()
.underVespaHome("var/vespa/aws/credentials.json");
private final Path credentialsPath;
public CredentialsProvider() {
this.credentialsPath = Path.of(DEFAULT_CREDENTIALS_PATH);
}
@Override
public AWSCredentials getCredentials() { return readCredentials(); }
@Override
public void refresh() { readCredentials(); }
private AWSSessionCredentials readCredentials() {
try {
Slime slime = SlimeUtils.jsonToSlime(Files.readAllBytes(credentialsPath));
Cursor cursor = slime.get();
String accessKey = cursor.field("awsAccessKey").asString();
String secretKey = cursor.field("awsSecretKey").asString();
String sessionToken = cursor.field("sessionToken").asString();
return new BasicSessionCredentials(accessKey, secretKey, sessionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
} |
Nice, fixed | private static Downloader downloader(String url) {
return url.startsWith("s3:
} | return url.startsWith("s3: | private static Downloader downloader(String url) {
URI uri = new URI(url);
return switch (uri.getScheme()) {
case "http", "https" -> new UrlDownloader();
case "s3" -> new S3Downloader();
default -> throw new IllegalArgumentException("Unsupported scheme '" + uri.getScheme() + "'");
};
} | class UrlDownloadRpcServer {
private static final Logger log = Logger.getLogger(UrlDownloadRpcServer.class.getName());
static final File defaultDownloadDirectory = new File(Defaults.getDefaults().underVespaHome("var/db/vespa/download"));
private final File rootDownloadDir;
private final ExecutorService executor = newFixedThreadPool(Math.max(8, getRuntime().availableProcessors()),
new DaemonThreadFactory("Rpc URL download executor"));
UrlDownloadRpcServer(Supervisor supervisor) {
this.rootDownloadDir = defaultDownloadDirectory;
supervisor.addMethod(new Method("url.waitFor", "s", "s", this::download)
.requireCapabilities(Capability.CONFIGPROXY__FILEDISTRIBUTION_API)
.methodDesc("get path to url download")
.paramDesc(0, "url", "url")
.returnDesc(0, "path", "path to file"));
}
void close() {
executor.shutdownNow();
try {
if ( ! executor.awaitTermination(10, TimeUnit.SECONDS))
log.log(WARNING, "Failed to shut down url download rpc server within timeout");
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
private void download(Request req) {
req.detach();
executor.execute(() -> downloadFile(req));
}
private void downloadFile(Request req) {
String url = req.parameters().get(0).asString();
File downloadDir = new File(rootDownloadDir, urlToDirName(url));
Downloader downloader = downloader(url);
if (downloader.alreadyDownloaded(downloader, downloadDir)) {
log.log(Level.INFO, "URL '" + url + "' already downloaded");
req.returnValues().add(new StringValue(new File(downloadDir, downloader.fileName()).getAbsolutePath()));
req.returnRequest();
return;
}
try {
Optional<File> file = downloader.downloadFile(url, downloadDir);
if (file.isPresent())
req.returnValues().add(new StringValue(file.get().getAbsolutePath()));
else
req.setError(DOES_NOT_EXIST, "URL '" + url + "' not found");
} catch (RuntimeException e) {
logAndSetRpcError(req, url, e, HTTP_ERROR);
} catch (Throwable e) {
logAndSetRpcError(req, url, e, INTERNAL_ERROR);
}
req.returnRequest();
}
private static void logAndSetRpcError(Request req, String url, Throwable e, int rpcErrorCode) {
String message = "Download of '" + url + "' failed: " + Exceptions.toMessageString(e);
log.log(Level.SEVERE, message);
req.setError(rpcErrorCode, e.getMessage());
}
private static String urlToDirName(String uri) {
return String.valueOf(XXHashFactory.fastestJavaInstance().hash64().hash(ByteBuffer.wrap(Utf8.toBytes(uri)), 0));
}
} | class UrlDownloadRpcServer {
private static final Logger log = Logger.getLogger(UrlDownloadRpcServer.class.getName());
static final File defaultDownloadDirectory = new File(Defaults.getDefaults().underVespaHome("var/db/vespa/download"));
private final File rootDownloadDir;
private final ExecutorService executor = newFixedThreadPool(Math.max(8, getRuntime().availableProcessors()),
new DaemonThreadFactory("Rpc URL download executor"));
UrlDownloadRpcServer(Supervisor supervisor) {
this.rootDownloadDir = defaultDownloadDirectory;
supervisor.addMethod(new Method("url.waitFor", "s", "s", this::download)
.requireCapabilities(Capability.CONFIGPROXY__FILEDISTRIBUTION_API)
.methodDesc("get path to url download")
.paramDesc(0, "url", "url")
.returnDesc(0, "path", "path to file"));
}
void close() {
executor.shutdownNow();
try {
if ( ! executor.awaitTermination(10, TimeUnit.SECONDS))
log.log(WARNING, "Failed to shut down url download rpc server within timeout");
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
private void download(Request req) {
req.detach();
executor.execute(() -> downloadFile(req));
}
private void downloadFile(Request req) {
String url = req.parameters().get(0).asString();
File downloadDir = new File(rootDownloadDir, urlToDirName(url));
Downloader downloader = downloader(url);
if (downloader.alreadyDownloaded(downloader, downloadDir)) {
log.log(Level.INFO, "URL '" + url + "' already downloaded");
req.returnValues().add(new StringValue(new File(downloadDir, downloader.fileName()).getAbsolutePath()));
req.returnRequest();
return;
}
try {
Files.createDirectories(downloadDir.toPath());
Optional<File> file = downloader.downloadFile(url, downloadDir);
if (file.isPresent())
req.returnValues().add(new StringValue(file.get().getAbsolutePath()));
else
req.setError(DOES_NOT_EXIST, "URL '" + url + "' not found");
} catch (RuntimeException e) {
logAndSetRpcError(req, url, e, HTTP_ERROR);
} catch (Throwable e) {
logAndSetRpcError(req, url, e, INTERNAL_ERROR);
}
req.returnRequest();
}
private static void logAndSetRpcError(Request req, String url, Throwable e, int rpcErrorCode) {
String message = "Download of '" + url + "' failed: " + Exceptions.toMessageString(e);
log.log(Level.SEVERE, message);
req.setError(rpcErrorCode, e.getMessage());
}
private static String urlToDirName(String uri) {
return String.valueOf(XXHashFactory.fastestJavaInstance().hash64().hash(ByteBuffer.wrap(Utf8.toBytes(uri)), 0));
}
} |
Fixed | public Optional<File> downloadFile(String url, File targetDir) throws IOException {
AmazonS3URI s3URI = new AmazonS3URI(url);
S3Object s3Object = s3Client.getObject(s3URI.getBucket(), s3URI.getKey());
File file = new File(targetDir, fileName());
Files.copy(s3Object.getObjectContent(), file.toPath());
return Optional.of(file);
} | Files.copy(s3Object.getObjectContent(), file.toPath()); | public Optional<File> downloadFile(String url, File targetDir) throws IOException {
AmazonS3URI s3URI = new AmazonS3URI(url);
S3Object s3Object = s3Client.getObject(s3URI.getBucket(), s3URI.getKey());
File file = new File(targetDir, fileName());
Files.copy(s3Object.getObjectContent(), file.toPath());
return Optional.of(file);
} | class S3Downloader implements Downloader {
private final AmazonS3 s3Client;
S3Downloader() {
this.s3Client = AmazonS3ClientBuilder.standard()
.withRegion(System.getenv("VESPA_CLOUD_NATIVE_REGION"))
.withCredentials(new CredentialsProvider())
.build();
}
@Override
private static class CredentialsProvider implements AWSCredentialsProvider {
private static final String DEFAULT_CREDENTIALS_PATH = Defaults.getDefaults()
.underVespaHome("var/vespa/aws/credentials.json");
private final Path credentialsPath;
public CredentialsProvider() {
this.credentialsPath = Path.of(DEFAULT_CREDENTIALS_PATH);
}
@Override
public AWSCredentials getCredentials() { return readCredentials(); }
@Override
public void refresh() { readCredentials(); }
private AWSSessionCredentials readCredentials() {
try {
Slime slime = SlimeUtils.jsonToSlime(Files.readAllBytes(credentialsPath));
Cursor cursor = slime.get();
String accessKey = cursor.field("awsAccessKey").asString();
String secretKey = cursor.field("awsSecretKey").asString();
String sessionToken = cursor.field("sessionToken").asString();
return new BasicSessionCredentials(accessKey, secretKey, sessionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
} | class S3Downloader implements Downloader {
private final AmazonS3 s3Client;
S3Downloader() {
this.s3Client = AmazonS3ClientBuilder.standard()
.withRegion(System.getenv("VESPA_CLOUD_NATIVE_REGION"))
.withCredentials(new CredentialsProvider())
.build();
}
@Override
private static class CredentialsProvider implements AWSCredentialsProvider {
private static final String DEFAULT_CREDENTIALS_PATH = Defaults.getDefaults()
.underVespaHome("var/vespa/aws/credentials.json");
private final Path credentialsPath;
public CredentialsProvider() {
this.credentialsPath = Path.of(DEFAULT_CREDENTIALS_PATH);
}
@Override
public AWSCredentials getCredentials() { return readCredentials(); }
@Override
public void refresh() { readCredentials(); }
private AWSSessionCredentials readCredentials() {
try {
Slime slime = SlimeUtils.jsonToSlime(Files.readAllBytes(credentialsPath));
Cursor cursor = slime.get();
String accessKey = cursor.field("awsAccessKey").asString();
String secretKey = cursor.field("awsSecretKey").asString();
String sessionToken = cursor.field("sessionToken").asString();
return new BasicSessionCredentials(accessKey, secretKey, sessionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
} |
Shouldn't `minus` have the same change? | public record GpuResources(int count, double memoryGb) {
private static final GpuResources zero = new GpuResources(0, 0);
public GpuResources {
validate(memoryGb, "memory");
}
private boolean lessThan(GpuResources other) {
return this.count < other.count ||
this.memoryGb < other.memoryGb;
}
public boolean isZero() { return this.equals(zero); }
public static GpuResources zero() { return zero; }
public boolean isDefault() { return this.equals(getDefault()); }
/** Returns zero gpu resources. */
public static GpuResources getDefault() { return zero; }
public GpuResources plus(GpuResources other) {
var thisMem = this.count() * this.memoryGb();
var otherMem = other.count() * other.memoryGb();
return new NodeResources.GpuResources(1, thisMem + otherMem);
}
public GpuResources minus(GpuResources other) {
return new GpuResources(this.count - other.count, this.memoryGb - other.memoryGb);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
GpuResources that = (GpuResources) o;
return count == that.count && equal(this.memoryGb, that.memoryGb);
}
@Override
public int hashCode() {
return Objects.hash(count, memoryGb);
}
} | return new NodeResources.GpuResources(1, thisMem + otherMem); | public record GpuResources(int count, double memoryGb) {
private static final GpuResources zero = new GpuResources(0, 0);
public GpuResources {
validate(memoryGb, "memory");
}
private boolean lessThan(GpuResources other) {
return this.count < other.count ||
this.memoryGb < other.memoryGb;
}
public boolean isZero() { return this.equals(zero); }
public static GpuResources zero() { return zero; }
public boolean isDefault() { return this.equals(getDefault()); }
/** Returns zero gpu resources. */
public static GpuResources getDefault() { return zero; }
public GpuResources plus(GpuResources other) {
if (other.isZero()) return this;
var thisMem = this.count() * this.memoryGb();
var otherMem = other.count() * other.memoryGb();
return new NodeResources.GpuResources(1, thisMem + otherMem);
}
public GpuResources minus(GpuResources other) {
if (other.isZero()) return this;
var thisMem = this.count() * this.memoryGb();
var otherMem = other.count() * other.memoryGb();
return new NodeResources.GpuResources(1, thisMem - otherMem);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
GpuResources that = (GpuResources) o;
return count == that.count && equal(this.memoryGb, that.memoryGb);
}
@Override
public int hashCode() {
return Objects.hash(count, memoryGb);
}
} | class NodeResources {
private static final double cpuUnitCost = 0.11;
private static final double memoryUnitCost = 0.011;
private static final double diskUnitCost = 0.0004;
private static final double gpuUnitCost = 0.075;
private static final NodeResources zero = new NodeResources(0, 0, 0, 0);
private static final NodeResources unspecified = new NodeResources(0, 0, 0, 0);
public enum DiskSpeed {
fast,
slow,
any;
/**
* Compares disk speeds by cost: Slower is cheaper, and therefore before.
* Any can be slow and therefore costs the same as slow.
*/
public static int compare(DiskSpeed a, DiskSpeed b) {
if (a == any) a = slow;
if (b == any) b = slow;
if (a == slow && b == fast) return -1;
if (a == fast && b == slow) return 1;
return 0;
}
public boolean compatibleWith(DiskSpeed other) {
return this == any || other == any || other == this;
}
private DiskSpeed combineWith(DiskSpeed other) {
if (this == any) return other;
if (other == any) return this;
if (this == other) return this;
throw new IllegalArgumentException(this + " cannot be combined with " + other);
}
public boolean isDefault() { return this == getDefault(); }
public static DiskSpeed getDefault() { return fast; }
}
public enum StorageType {
remote,
local,
any;
/**
* Compares storage type by cost: Remote is cheaper, and therefore before.
* Any can be remote and therefore costs the same as remote.
*/
public static int compare(StorageType a, StorageType b) {
if (a == any) a = remote;
if (b == any) b = remote;
if (a == remote && b == local) return -1;
if (a == local && b == remote) return 1;
return 0;
}
public boolean compatibleWith(StorageType other) {
return this == any || other == any || other == this;
}
private StorageType combineWith(StorageType other) {
if (this == any) return other;
if (other == any) return this;
if (this == other) return this;
throw new IllegalArgumentException(this + " cannot be combined with " + other);
}
public boolean isDefault() { return this == getDefault(); }
public static StorageType getDefault() { return any; }
}
public enum Architecture {
x86_64,
arm64,
any;
public static int compare(Architecture a, Architecture b) {
if (a == any) a = x86_64;
if (b == any) b = x86_64;
if (a == x86_64 && b == arm64) return -1;
if (a == arm64 && b == x86_64) return 1;
return 0;
}
public boolean compatibleWith(Architecture other) {
return this == any || other == any || other == this;
}
private Architecture combineWith(Architecture other) {
if (this == any) return other;
if (other == any) return this;
if (this == other) return this;
throw new IllegalArgumentException(this + " cannot be combined with " + other);
}
public boolean isDefault() { return this == getDefault(); }
public static Architecture getDefault() { return any; }
}
private final double vcpu;
private final double memoryGb;
private final double diskGb;
private final double bandwidthGbps;
private final GpuResources gpuResources;
private final DiskSpeed diskSpeed;
private final StorageType storageType;
private final Architecture architecture;
public NodeResources(double vcpu, double memoryGb, double diskGb, double bandwidthGbps) {
this(vcpu, memoryGb, diskGb, bandwidthGbps, DiskSpeed.getDefault());
}
public NodeResources(double vcpu, double memoryGb, double diskGb, double bandwidthGbps, DiskSpeed diskSpeed) {
this(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, StorageType.getDefault(), Architecture.getDefault(), GpuResources.getDefault());
}
public NodeResources(double vcpu, double memoryGb, double diskGb, double bandwidthGbps, DiskSpeed diskSpeed, StorageType storageType) {
this(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, Architecture.getDefault(), GpuResources.getDefault());
}
public NodeResources(double vcpu, double memoryGb, double diskGb, double bandwidthGbps, DiskSpeed diskSpeed, StorageType storageType, Architecture architecture) {
this(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, GpuResources.getDefault());
}
public NodeResources(double vcpu, double memoryGb, double diskGb, double bandwidthGbps, DiskSpeed diskSpeed, StorageType storageType, Architecture architecture, GpuResources gpuResources) {
this.vcpu = validate(vcpu, "vcpu");
this.memoryGb = validate(memoryGb, "memory");
this.diskGb = validate(diskGb, "disk");
this.bandwidthGbps = validate(bandwidthGbps, "bandwidth");
this.gpuResources = gpuResources;
this.diskSpeed = diskSpeed;
this.storageType = storageType;
this.architecture = architecture;
}
public double vcpu() { return vcpu; }
public double memoryGb() { return memoryGb; }
public double diskGb() { return diskGb; }
public double bandwidthGbps() { return bandwidthGbps; }
public DiskSpeed diskSpeed() { return diskSpeed; }
public StorageType storageType() { return storageType; }
public Architecture architecture() { return architecture; }
public GpuResources gpuResources() { return gpuResources; }
public boolean vcpuIsUnspecified() { return vcpu == 0; }
public boolean memoryGbIsUnspecified() { return memoryGb == 0; }
public boolean diskGbIsUnspecified() { return diskGb == 0; }
public boolean bandwidthGbpsIsUnspecified() { return bandwidthGbps == 0; }
/** Returns the standard cost of these resources, in dollars per hour */
public double cost() {
return (vcpu * cpuUnitCost) +
(memoryGb * memoryUnitCost) +
(diskGb * diskUnitCost) +
(gpuResources.count * gpuResources.memoryGb * gpuUnitCost);
}
public NodeResources withVcpu(double vcpu) {
if (vcpu == this.vcpu) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources withMemoryGb(double memoryGb) {
if (memoryGb == this.memoryGb) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources withDiskGb(double diskGb) {
if (diskGb == this.diskGb) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources withBandwidthGbps(double bandwidthGbps) {
ensureSpecified();
if (bandwidthGbps == this.bandwidthGbps) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources with(DiskSpeed diskSpeed) {
ensureSpecified();
if (diskSpeed == this.diskSpeed) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources with(StorageType storageType) {
ensureSpecified();
if (storageType == this.storageType) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources with(Architecture architecture) {
ensureSpecified();
if (architecture == this.architecture) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources with(GpuResources gpuResources) {
ensureSpecified();
if (this.gpuResources.equals(gpuResources)) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources withUnspecifiedNumbersFrom(NodeResources fullySpecified) {
var resources = this;
if (resources.vcpuIsUnspecified())
resources = resources.withVcpu(fullySpecified.vcpu());
if (resources.memoryGbIsUnspecified())
resources = resources.withMemoryGb(fullySpecified.memoryGb());
if (resources.diskGbIsUnspecified())
resources = resources.withDiskGb(fullySpecified.diskGb());
if (resources.bandwidthGbpsIsUnspecified())
resources = resources.withBandwidthGbps(fullySpecified.bandwidthGbps());
return resources;
}
/** Returns this with disk speed, storage type and architecture set to any */
public NodeResources justNumbers() {
if (isUnspecified()) return unspecified();
return with(NodeResources.DiskSpeed.any).with(StorageType.any).with(Architecture.any);
}
/** Returns this with all numbers set to 0 */
public NodeResources justNonNumbers() {
if (isUnspecified()) return unspecified();
return withVcpu(0).withMemoryGb(0).withDiskGb(0).withBandwidthGbps(0).with(GpuResources.zero());
}
public NodeResources subtract(NodeResources other) {
ensureSpecified();
other.ensureSpecified();
if ( ! this.isInterchangeableWith(other))
throw new IllegalArgumentException(this + " and " + other + " are not interchangeable");
return new NodeResources(vcpu - other.vcpu,
memoryGb - other.memoryGb,
diskGb - other.diskGb,
bandwidthGbps - other.bandwidthGbps,
this.diskSpeed.combineWith(other.diskSpeed),
this.storageType.combineWith(other.storageType),
this.architecture.combineWith(other.architecture),
this.gpuResources.minus(other.gpuResources));
}
public NodeResources add(NodeResources other) {
ensureSpecified();
if ( ! this.isInterchangeableWith(other))
throw new IllegalArgumentException(this + " and " + other + " are not interchangeable");
return new NodeResources(vcpu + other.vcpu,
memoryGb + other.memoryGb,
diskGb + other.diskGb,
bandwidthGbps + other.bandwidthGbps,
this.diskSpeed.combineWith(other.diskSpeed),
this.storageType.combineWith(other.storageType),
this.architecture.combineWith(other.architecture),
this.gpuResources.plus(other.gpuResources));
}
public NodeResources multipliedBy(double factor) {
return this.withVcpu(vcpu * factor)
.withMemoryGb(memoryGb * factor)
.withDiskGb(diskGb * factor)
.withBandwidthGbps(bandwidthGbps * factor);
}
private boolean isInterchangeableWith(NodeResources other) {
ensureSpecified();
other.ensureSpecified();
if (this.diskSpeed != DiskSpeed.any && other.diskSpeed != DiskSpeed.any && this.diskSpeed != other.diskSpeed)
return false;
if (this.storageType != StorageType.any && other.storageType != StorageType.any && this.storageType != other.storageType)
return false;
if (this.architecture != Architecture.any && other.architecture != Architecture.any && this.architecture != other.architecture)
return false;
return true;
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof NodeResources other)) return false;
if ( ! equal(this.vcpu, other.vcpu)) return false;
if ( ! equal(this.memoryGb, other.memoryGb)) return false;
if ( ! equal(this.diskGb, other.diskGb)) return false;
if ( ! equal(this.bandwidthGbps, other.bandwidthGbps)) return false;
if ( ! this.gpuResources.equals(other.gpuResources)) return false;
if (this.diskSpeed != other.diskSpeed) return false;
if (this.storageType != other.storageType) return false;
if (this.architecture != other.architecture) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture);
}
private static StringBuilder appendDouble(StringBuilder sb, double d) {
long x10 = Math.round(d * 10);
sb.append(x10 / 10).append('.').append(x10 % 10);
return sb;
}
@Override
public String toString() {
if (isUnspecified())
return "unspecified resources";
StringBuilder sb = new StringBuilder("[vcpu: ");
appendDouble(sb, vcpu);
sb.append(", memory: ");
appendDouble(sb, memoryGb);
sb.append(" Gb, disk: ");
appendDouble(sb, diskGb);
sb.append(" Gb");
if (bandwidthGbps > 0) {
sb.append(", bandwidth: ");
appendDouble(sb, bandwidthGbps);
sb.append(" Gbps");
}
if ( !diskSpeed.isDefault()) {
sb.append(", disk speed: ").append(diskSpeed);
}
if ( !storageType.isDefault()) {
sb.append(", storage type: ").append(storageType);
}
sb.append(", architecture: ").append(architecture);
if ( !gpuResources.isDefault()) {
sb.append(", gpu count: ").append(gpuResources.count());
sb.append(", gpu memory: ");
appendDouble(sb, gpuResources.memoryGb());
sb.append(" Gb");
}
sb.append(']');
return sb.toString();
}
/** Returns true if all the resources of this are the same or larger than the given resources */
public boolean satisfies(NodeResources other) {
ensureSpecified();
other.ensureSpecified();
if (this.vcpu < other.vcpu) return false;
if (this.memoryGb < other.memoryGb) return false;
if (this.diskGb < other.diskGb) return false;
if (this.bandwidthGbps < other.bandwidthGbps) return false;
if (this.gpuResources.lessThan(other.gpuResources)) return false;
if (other.diskSpeed != DiskSpeed.any && other.diskSpeed != this.diskSpeed) return false;
if (other.storageType != StorageType.any && other.storageType != this.storageType) return false;
if (other.architecture != Architecture.any && other.architecture != this.architecture) return false;
return true;
}
/**
* Returns true if all the resources of this are the same as or compatible with the requested resources:
* - Equal numbers only where request implies it (i.e not for disk if storage is any/remote, and not for bandwidth
* where we don't enforce constraints),
* - Compatible non-numbers.
*/
public boolean compatibleWith(NodeResources requested) {
if ( ! equal(this.vcpu, requested.vcpu)) return false;
if ( ! equal(this.memoryGb, requested.memoryGb)) return false;
if (this.storageType == StorageType.local || requested.storageType == StorageType.local) {
if ( ! equal(this.diskGb, requested.diskGb)) return false;
}
else {
if (this.diskGb < requested.diskGb) return false;
}
if ( ! this.gpuResources.equals(requested.gpuResources)) return false;
if ( ! this.diskSpeed.compatibleWith(requested.diskSpeed)) return false;
if ( ! this.storageType.compatibleWith(requested.storageType)) return false;
if ( ! this.architecture.compatibleWith(requested.architecture)) return false;
return true;
}
/**
* Returns true if all the resources of this are the same as or compatible with the given resources:
* - Equal numbers.
* - Compatible non-numbers.
*/
public boolean equalsWhereSpecified(NodeResources other) {
if ( ! equal(this.vcpu, other.vcpu)) return false;
if ( ! equal(this.memoryGb, other.memoryGb)) return false;
if ( ! equal(this.diskGb, other.diskGb)) return false;
if ( ! equal(this.bandwidthGbps, other.bandwidthGbps)) return false;
if ( ! this.gpuResources.equals(other.gpuResources)) return false;
if ( ! this.diskSpeed.compatibleWith(other.diskSpeed)) return false;
if ( ! this.storageType.compatibleWith(other.storageType)) return false;
if ( ! this.architecture.compatibleWith(other.architecture)) return false;
return true;
}
public static NodeResources unspecified() { return unspecified; }
public boolean isUnspecified() { return this == unspecified; }
private void ensureSpecified() {
if (isUnspecified())
throw new IllegalStateException("Cannot perform this on unspecified resources");
}
public double distanceTo(NodeResources other) {
if ( ! this.diskSpeed().compatibleWith(other.diskSpeed())) return Double.MAX_VALUE;
if ( ! this.storageType().compatibleWith(other.storageType())) return Double.MAX_VALUE;
double distance = Math.pow(this.vcpu() - other.vcpu(), 2) + Math.pow(this.memoryGb() - other.memoryGb(), 2);
if (this.storageType() == StorageType.local || other.storageType() == StorageType.local)
distance += Math.pow(this.diskGb() - other.diskGb(), 2);
return distance;
}
/** Returns this.isUnspecified() ? Optional.empty() : Optional.of(this) */
public Optional<NodeResources> asOptional() {
return this.isUnspecified() ? Optional.empty() : Optional.of(this);
}
private static boolean equal(double a, double b) {
return Math.abs(a - b) < 0.00000001;
}
/**
* Create this from serial form.
*
* @throws IllegalArgumentException if the given string cannot be parsed as a serial form of this
*/
public static NodeResources fromLegacyName(String name) {
if ( ! name.startsWith("d-"))
throw new IllegalArgumentException("A node specification string must start by 'd-' but was '" + name + "'");
String[] parts = name.split("-");
if (parts.length != 4)
throw new IllegalArgumentException("A node specification string must contain three numbers separated by '-' but was '" + name + "'");
double cpu = Integer.parseInt(parts[1]);
double mem = Integer.parseInt(parts[2]);
double dsk = Integer.parseInt(parts[3]);
if (cpu == 0) cpu = 0.5;
if (cpu == 2 && mem == 8 ) cpu = 1.5;
if (cpu == 2 && mem == 12 ) cpu = 2.3;
return new NodeResources(cpu, mem, dsk, 0.3, DiskSpeed.getDefault(), StorageType.getDefault(), Architecture.any);
}
private static double validate(double value, String valueName) {
if (Double.isNaN(value)) throw new IllegalArgumentException(valueName + " cannot be NaN");
if (Double.isInfinite(value)) throw new IllegalArgumentException(valueName + " cannot be infinite");
return value;
}
public boolean isZero() {
return this.equals(zero);
}
public static NodeResources zero() { return zero; }
} | class NodeResources {
private static final double cpuUnitCost = 0.11;
private static final double memoryUnitCost = 0.011;
private static final double diskUnitCost = 0.0004;
private static final double gpuUnitCost = 0.075;
private static final NodeResources zero = new NodeResources(0, 0, 0, 0);
private static final NodeResources unspecified = new NodeResources(0, 0, 0, 0);
public enum DiskSpeed {
fast,
slow,
any;
/**
* Compares disk speeds by cost: Slower is cheaper, and therefore before.
* Any can be slow and therefore costs the same as slow.
*/
public static int compare(DiskSpeed a, DiskSpeed b) {
if (a == any) a = slow;
if (b == any) b = slow;
if (a == slow && b == fast) return -1;
if (a == fast && b == slow) return 1;
return 0;
}
public boolean compatibleWith(DiskSpeed other) {
return this == any || other == any || other == this;
}
private DiskSpeed combineWith(DiskSpeed other) {
if (this == any) return other;
if (other == any) return this;
if (this == other) return this;
throw new IllegalArgumentException(this + " cannot be combined with " + other);
}
public boolean isDefault() { return this == getDefault(); }
public static DiskSpeed getDefault() { return fast; }
}
public enum StorageType {
remote,
local,
any;
/**
* Compares storage type by cost: Remote is cheaper, and therefore before.
* Any can be remote and therefore costs the same as remote.
*/
public static int compare(StorageType a, StorageType b) {
if (a == any) a = remote;
if (b == any) b = remote;
if (a == remote && b == local) return -1;
if (a == local && b == remote) return 1;
return 0;
}
public boolean compatibleWith(StorageType other) {
return this == any || other == any || other == this;
}
private StorageType combineWith(StorageType other) {
if (this == any) return other;
if (other == any) return this;
if (this == other) return this;
throw new IllegalArgumentException(this + " cannot be combined with " + other);
}
public boolean isDefault() { return this == getDefault(); }
public static StorageType getDefault() { return any; }
}
public enum Architecture {
x86_64,
arm64,
any;
public static int compare(Architecture a, Architecture b) {
if (a == any) a = x86_64;
if (b == any) b = x86_64;
if (a == x86_64 && b == arm64) return -1;
if (a == arm64 && b == x86_64) return 1;
return 0;
}
public boolean compatibleWith(Architecture other) {
return this == any || other == any || other == this;
}
private Architecture combineWith(Architecture other) {
if (this == any) return other;
if (other == any) return this;
if (this == other) return this;
throw new IllegalArgumentException(this + " cannot be combined with " + other);
}
public boolean isDefault() { return this == getDefault(); }
public static Architecture getDefault() { return any; }
}
private final double vcpu;
private final double memoryGb;
private final double diskGb;
private final double bandwidthGbps;
private final GpuResources gpuResources;
private final DiskSpeed diskSpeed;
private final StorageType storageType;
private final Architecture architecture;
public NodeResources(double vcpu, double memoryGb, double diskGb, double bandwidthGbps) {
this(vcpu, memoryGb, diskGb, bandwidthGbps, DiskSpeed.getDefault());
}
public NodeResources(double vcpu, double memoryGb, double diskGb, double bandwidthGbps, DiskSpeed diskSpeed) {
this(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, StorageType.getDefault(), Architecture.getDefault(), GpuResources.getDefault());
}
public NodeResources(double vcpu, double memoryGb, double diskGb, double bandwidthGbps, DiskSpeed diskSpeed, StorageType storageType) {
this(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, Architecture.getDefault(), GpuResources.getDefault());
}
public NodeResources(double vcpu, double memoryGb, double diskGb, double bandwidthGbps, DiskSpeed diskSpeed, StorageType storageType, Architecture architecture) {
this(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, GpuResources.getDefault());
}
public NodeResources(double vcpu, double memoryGb, double diskGb, double bandwidthGbps, DiskSpeed diskSpeed, StorageType storageType, Architecture architecture, GpuResources gpuResources) {
this.vcpu = validate(vcpu, "vcpu");
this.memoryGb = validate(memoryGb, "memory");
this.diskGb = validate(diskGb, "disk");
this.bandwidthGbps = validate(bandwidthGbps, "bandwidth");
this.gpuResources = gpuResources;
this.diskSpeed = diskSpeed;
this.storageType = storageType;
this.architecture = architecture;
}
public double vcpu() { return vcpu; }
public double memoryGb() { return memoryGb; }
public double diskGb() { return diskGb; }
public double bandwidthGbps() { return bandwidthGbps; }
public DiskSpeed diskSpeed() { return diskSpeed; }
public StorageType storageType() { return storageType; }
public Architecture architecture() { return architecture; }
public GpuResources gpuResources() { return gpuResources; }
public boolean vcpuIsUnspecified() { return vcpu == 0; }
public boolean memoryGbIsUnspecified() { return memoryGb == 0; }
public boolean diskGbIsUnspecified() { return diskGb == 0; }
public boolean bandwidthGbpsIsUnspecified() { return bandwidthGbps == 0; }
/** Returns the standard cost of these resources, in dollars per hour */
public double cost() {
return (vcpu * cpuUnitCost) +
(memoryGb * memoryUnitCost) +
(diskGb * diskUnitCost) +
(gpuResources.count * gpuResources.memoryGb * gpuUnitCost);
}
public NodeResources withVcpu(double vcpu) {
if (vcpu == this.vcpu) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources withMemoryGb(double memoryGb) {
if (memoryGb == this.memoryGb) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources withDiskGb(double diskGb) {
if (diskGb == this.diskGb) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources withBandwidthGbps(double bandwidthGbps) {
ensureSpecified();
if (bandwidthGbps == this.bandwidthGbps) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources with(DiskSpeed diskSpeed) {
ensureSpecified();
if (diskSpeed == this.diskSpeed) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources with(StorageType storageType) {
ensureSpecified();
if (storageType == this.storageType) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources with(Architecture architecture) {
ensureSpecified();
if (architecture == this.architecture) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources with(GpuResources gpuResources) {
ensureSpecified();
if (this.gpuResources.equals(gpuResources)) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources withUnspecifiedNumbersFrom(NodeResources fullySpecified) {
var resources = this;
if (resources.vcpuIsUnspecified())
resources = resources.withVcpu(fullySpecified.vcpu());
if (resources.memoryGbIsUnspecified())
resources = resources.withMemoryGb(fullySpecified.memoryGb());
if (resources.diskGbIsUnspecified())
resources = resources.withDiskGb(fullySpecified.diskGb());
if (resources.bandwidthGbpsIsUnspecified())
resources = resources.withBandwidthGbps(fullySpecified.bandwidthGbps());
return resources;
}
/** Returns this with disk speed, storage type and architecture set to any */
public NodeResources justNumbers() {
if (isUnspecified()) return unspecified();
return with(NodeResources.DiskSpeed.any).with(StorageType.any).with(Architecture.any);
}
/** Returns this with all numbers set to 0 */
public NodeResources justNonNumbers() {
if (isUnspecified()) return unspecified();
return withVcpu(0).withMemoryGb(0).withDiskGb(0).withBandwidthGbps(0).with(GpuResources.zero());
}
public NodeResources subtract(NodeResources other) {
ensureSpecified();
other.ensureSpecified();
if ( ! this.isInterchangeableWith(other))
throw new IllegalArgumentException(this + " and " + other + " are not interchangeable");
return new NodeResources(vcpu - other.vcpu,
memoryGb - other.memoryGb,
diskGb - other.diskGb,
bandwidthGbps - other.bandwidthGbps,
this.diskSpeed.combineWith(other.diskSpeed),
this.storageType.combineWith(other.storageType),
this.architecture.combineWith(other.architecture),
this.gpuResources.minus(other.gpuResources));
}
public NodeResources add(NodeResources other) {
ensureSpecified();
if ( ! this.isInterchangeableWith(other))
throw new IllegalArgumentException(this + " and " + other + " are not interchangeable");
return new NodeResources(vcpu + other.vcpu,
memoryGb + other.memoryGb,
diskGb + other.diskGb,
bandwidthGbps + other.bandwidthGbps,
this.diskSpeed.combineWith(other.diskSpeed),
this.storageType.combineWith(other.storageType),
this.architecture.combineWith(other.architecture),
this.gpuResources.plus(other.gpuResources));
}
public NodeResources multipliedBy(double factor) {
return this.withVcpu(vcpu * factor)
.withMemoryGb(memoryGb * factor)
.withDiskGb(diskGb * factor)
.withBandwidthGbps(bandwidthGbps * factor);
}
private boolean isInterchangeableWith(NodeResources other) {
ensureSpecified();
other.ensureSpecified();
if (this.diskSpeed != DiskSpeed.any && other.diskSpeed != DiskSpeed.any && this.diskSpeed != other.diskSpeed)
return false;
if (this.storageType != StorageType.any && other.storageType != StorageType.any && this.storageType != other.storageType)
return false;
if (this.architecture != Architecture.any && other.architecture != Architecture.any && this.architecture != other.architecture)
return false;
return true;
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof NodeResources other)) return false;
if ( ! equal(this.vcpu, other.vcpu)) return false;
if ( ! equal(this.memoryGb, other.memoryGb)) return false;
if ( ! equal(this.diskGb, other.diskGb)) return false;
if ( ! equal(this.bandwidthGbps, other.bandwidthGbps)) return false;
if ( ! this.gpuResources.equals(other.gpuResources)) return false;
if (this.diskSpeed != other.diskSpeed) return false;
if (this.storageType != other.storageType) return false;
if (this.architecture != other.architecture) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture);
}
private static StringBuilder appendDouble(StringBuilder sb, double d) {
long x10 = Math.round(d * 10);
sb.append(x10 / 10).append('.').append(x10 % 10);
return sb;
}
@Override
public String toString() {
if (isUnspecified())
return "unspecified resources";
StringBuilder sb = new StringBuilder("[vcpu: ");
appendDouble(sb, vcpu);
sb.append(", memory: ");
appendDouble(sb, memoryGb);
sb.append(" Gb, disk: ");
appendDouble(sb, diskGb);
sb.append(" Gb");
if (bandwidthGbps > 0) {
sb.append(", bandwidth: ");
appendDouble(sb, bandwidthGbps);
sb.append(" Gbps");
}
if ( !diskSpeed.isDefault()) {
sb.append(", disk speed: ").append(diskSpeed);
}
if ( !storageType.isDefault()) {
sb.append(", storage type: ").append(storageType);
}
sb.append(", architecture: ").append(architecture);
if ( !gpuResources.isDefault()) {
sb.append(", gpu count: ").append(gpuResources.count());
sb.append(", gpu memory: ");
appendDouble(sb, gpuResources.memoryGb());
sb.append(" Gb");
}
sb.append(']');
return sb.toString();
}
/** Returns true if all the resources of this are the same or larger than the given resources */
public boolean satisfies(NodeResources other) {
ensureSpecified();
other.ensureSpecified();
if (this.vcpu < other.vcpu) return false;
if (this.memoryGb < other.memoryGb) return false;
if (this.diskGb < other.diskGb) return false;
if (this.bandwidthGbps < other.bandwidthGbps) return false;
if (this.gpuResources.lessThan(other.gpuResources)) return false;
if (other.diskSpeed != DiskSpeed.any && other.diskSpeed != this.diskSpeed) return false;
if (other.storageType != StorageType.any && other.storageType != this.storageType) return false;
if (other.architecture != Architecture.any && other.architecture != this.architecture) return false;
return true;
}
/**
* Returns true if all the resources of this are the same as or compatible with the requested resources:
* - Equal numbers only where request implies it (i.e not for disk if storage is any/remote, and not for bandwidth
* where we don't enforce constraints),
* - Compatible non-numbers.
*/
public boolean compatibleWith(NodeResources requested) {
if ( ! equal(this.vcpu, requested.vcpu)) return false;
if ( ! equal(this.memoryGb, requested.memoryGb)) return false;
if (this.storageType == StorageType.local || requested.storageType == StorageType.local) {
if ( ! equal(this.diskGb, requested.diskGb)) return false;
}
else {
if (this.diskGb < requested.diskGb) return false;
}
if ( ! this.gpuResources.equals(requested.gpuResources)) return false;
if ( ! this.diskSpeed.compatibleWith(requested.diskSpeed)) return false;
if ( ! this.storageType.compatibleWith(requested.storageType)) return false;
if ( ! this.architecture.compatibleWith(requested.architecture)) return false;
return true;
}
/**
* Returns true if all the resources of this are the same as or compatible with the given resources:
* - Equal numbers.
* - Compatible non-numbers.
*/
public boolean equalsWhereSpecified(NodeResources other) {
if ( ! equal(this.vcpu, other.vcpu)) return false;
if ( ! equal(this.memoryGb, other.memoryGb)) return false;
if ( ! equal(this.diskGb, other.diskGb)) return false;
if ( ! equal(this.bandwidthGbps, other.bandwidthGbps)) return false;
if ( ! this.gpuResources.equals(other.gpuResources)) return false;
if ( ! this.diskSpeed.compatibleWith(other.diskSpeed)) return false;
if ( ! this.storageType.compatibleWith(other.storageType)) return false;
if ( ! this.architecture.compatibleWith(other.architecture)) return false;
return true;
}
public static NodeResources unspecified() { return unspecified; }
public boolean isUnspecified() { return this == unspecified; }
private void ensureSpecified() {
if (isUnspecified())
throw new IllegalStateException("Cannot perform this on unspecified resources");
}
public double distanceTo(NodeResources other) {
if ( ! this.diskSpeed().compatibleWith(other.diskSpeed())) return Double.MAX_VALUE;
if ( ! this.storageType().compatibleWith(other.storageType())) return Double.MAX_VALUE;
double distance = Math.pow(this.vcpu() - other.vcpu(), 2) + Math.pow(this.memoryGb() - other.memoryGb(), 2);
if (this.storageType() == StorageType.local || other.storageType() == StorageType.local)
distance += Math.pow(this.diskGb() - other.diskGb(), 2);
return distance;
}
/** Returns this.isUnspecified() ? Optional.empty() : Optional.of(this) */
public Optional<NodeResources> asOptional() {
return this.isUnspecified() ? Optional.empty() : Optional.of(this);
}
private static boolean equal(double a, double b) {
return Math.abs(a - b) < 0.00000001;
}
/**
* Create this from serial form.
*
* @throws IllegalArgumentException if the given string cannot be parsed as a serial form of this
*/
public static NodeResources fromLegacyName(String name) {
if ( ! name.startsWith("d-"))
throw new IllegalArgumentException("A node specification string must start by 'd-' but was '" + name + "'");
String[] parts = name.split("-");
if (parts.length != 4)
throw new IllegalArgumentException("A node specification string must contain three numbers separated by '-' but was '" + name + "'");
double cpu = Integer.parseInt(parts[1]);
double mem = Integer.parseInt(parts[2]);
double dsk = Integer.parseInt(parts[3]);
if (cpu == 0) cpu = 0.5;
if (cpu == 2 && mem == 8 ) cpu = 1.5;
if (cpu == 2 && mem == 12 ) cpu = 2.3;
return new NodeResources(cpu, mem, dsk, 0.3, DiskSpeed.getDefault(), StorageType.getDefault(), Architecture.any);
}
private static double validate(double value, String valueName) {
if (Double.isNaN(value)) throw new IllegalArgumentException(valueName + " cannot be NaN");
if (Double.isInfinite(value)) throw new IllegalArgumentException(valueName + " cannot be infinite");
return value;
}
public boolean isZero() {
return this.equals(zero);
}
public static NodeResources zero() { return zero; }
} |
😂 | public long getConnectedAt(TimeUnit unit) {
return parentRequest.getConnectedAt(unit);
} | return parentRequest.getConnectedAt(unit); | public long getConnectedAt(TimeUnit unit) {
return parentRequest.getConnectedAt(unit);
} | class Builder {
private final HttpRequest parent;
private com.yahoo.jdisc.http.HttpRequest jdiscRequest;
Method method = null;
Version version = null;
Map<String, String> properties = new HashMap<>();
InputStream requestData = null;
URI uri = null;
CurrentContainer container = null;
private static final String nag = " must be set before the attempted operation.";
SocketAddress remoteAddress;
private void boom(Object ref, String what) {
if (ref == null) {
throw new IllegalStateException(what + nag);
}
}
private void requireUri() {
boom(uri, "An URI");
}
private void requireContainer() {
boom(container, "A CurrentContainer instance");
}
private void ensureJdiscParent() {
if (jdiscRequest == null) {
if (parent == null) {
throw new IllegalStateException("Neither another HttpRequest nor JDisc request available.");
} else {
jdiscRequest = parent.getJDiscRequest();
}
}
}
private void ensureRequestData() {
if (requestData == null) {
if (parent == null) {
throw new IllegalStateException(
"Neither another HttpRequest nor request data input stream available.");
} else {
requestData = parent.getData();
}
}
}
/**
* Instantiate a request builder with defaults from an existing request.
* If the request is null, a JDisc request must be set explitly using
* {@link
* instantiating any HTTP request.
*
* @param request source for defaults and parent JDisc request, may be null
* @see HttpRequest
*/
public Builder(HttpRequest request) {
this(request, request.getJDiscRequest());
}
/**
* Instantiate a request builder with defaults from an existing request.
*
* @param request parent JDisc request
* @see HttpRequest
*/
public Builder(com.yahoo.jdisc.http.HttpRequest request) {
this(null, request);
}
private Builder(HttpRequest parent, com.yahoo.jdisc.http.HttpRequest jdiscRequest) {
this.parent = parent;
this.jdiscRequest = jdiscRequest;
populateProperties();
}
private void populateProperties() {
if (parent == null) return;
properties.putAll(parent.propertyMap());
}
/**
* Add a parameter to the request. Multi-value parameters are not supported.
*
* @param key parameter name
* @param value parameter value
* @return this Builder instance
*/
public Builder put(String key, String value) {
properties.put(key, value);
return this;
}
/**
* Removes the parameter from the request properties.
* If there is no such parameter, nothing will be done.
*/
public Builder removeProperty(String parameterName) {
properties.remove(parameterName);
return this;
}
/**
* Set the HTTP method for the new request.
*
* @param method the HTTP method to use for the new request
* @return this Builder instance
*/
public Builder method(Method method) {
this.method = method;
return this;
}
/**
* Define the JDisc parent request.
*
* @param request a valid JDisc request for the current container
* @return this Builder instance
*/
public Builder jdiscRequest(com.yahoo.jdisc.http.HttpRequest request) {
this.jdiscRequest = request;
return this;
}
/**
* Set an inputstream to use for the request. If not set, the data from
* the original HttpRequest is used.
*
* @param requestData data to be consumed, e.g. POST data
* @return this Builder instance
*/
public Builder requestData(InputStream requestData) {
this.requestData = requestData;
return this;
}
/**
* Set the URI of the server request created.
*
* @param uri a valid URI for a server request
* @return this Builder instance
*/
public Builder uri(URI uri) {
this.uri = uri;
return this;
}
/**
* Create a new HTTP request without creating a new JDisc request. This
* is for scenarios where another HTTP request handler is invoked
* directly without dispatching through JDisc. The parent JDisc request
* for the original HttpRequest will be passed on the new HttpRequest
* instance's JDisc request, but no properties will be propagated into
* the original JDisc request.
*
* @return a new HttpRequest instance reflecting the given request data and parameters
*/
public HttpRequest createDirectRequest() {
ensureRequestData();
ensureJdiscParent();
return new HttpRequest(jdiscRequest, requestData, properties);
}
/**
* Start of API for synchronous HTTP request dispatch. Not yet ready for use.
*
* @return a new client request
*/
public HttpRequest createClientRequest() {
ensureJdiscParent();
requireUri();
com.yahoo.jdisc.http.HttpRequest clientRequest;
if (method == null) {
clientRequest = com.yahoo.jdisc.http.HttpRequest
.newClientRequest(jdiscRequest, uri);
} else {
if (version == null) {
clientRequest = com.yahoo.jdisc.http.HttpRequest
.newClientRequest(jdiscRequest, uri, method);
} else {
clientRequest = com.yahoo.jdisc.http.HttpRequest
.newClientRequest(jdiscRequest, uri, method,
version);
}
}
setParameters(clientRequest);
return new HttpRequest(clientRequest, requestData, properties);
}
/**
* Start of API for synchronous HTTP request dispatch. Not yet ready for use.
*
* @return a new server request
*/
public HttpRequest createServerRequest() {
requireUri();
requireContainer();
com.yahoo.jdisc.http.HttpRequest serverRequest;
if (method == null) {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri);
} else {
if (version == null) {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri, method);
} else {
if (remoteAddress == null) {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri, method,
version);
} else {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri, method,
version, remoteAddress);
}
}
}
setParameters(serverRequest);
return new HttpRequest(serverRequest, requestData, properties);
}
private void setParameters(com.yahoo.jdisc.http.HttpRequest request) {
for (Map.Entry<String, String> entry : properties.entrySet()) {
request.parameters().put(entry.getKey(), wrap(entry.getValue()));
}
}
} | class Builder {
private final HttpRequest parent;
private com.yahoo.jdisc.http.HttpRequest jdiscRequest;
Method method = null;
Version version = null;
Map<String, String> properties = new HashMap<>();
InputStream requestData = null;
URI uri = null;
CurrentContainer container = null;
private static final String nag = " must be set before the attempted operation.";
SocketAddress remoteAddress;
private void boom(Object ref, String what) {
if (ref == null) {
throw new IllegalStateException(what + nag);
}
}
private void requireUri() {
boom(uri, "An URI");
}
private void requireContainer() {
boom(container, "A CurrentContainer instance");
}
private void ensureJdiscParent() {
if (jdiscRequest == null) {
if (parent == null) {
throw new IllegalStateException("Neither another HttpRequest nor JDisc request available.");
} else {
jdiscRequest = parent.getJDiscRequest();
}
}
}
private void ensureRequestData() {
if (requestData == null) {
if (parent == null) {
throw new IllegalStateException(
"Neither another HttpRequest nor request data input stream available.");
} else {
requestData = parent.getData();
}
}
}
/**
* Instantiate a request builder with defaults from an existing request.
* If the request is null, a JDisc request must be set explitly using
* {@link
* instantiating any HTTP request.
*
* @param request source for defaults and parent JDisc request, may be null
* @see HttpRequest
*/
public Builder(HttpRequest request) {
this(request, request.getJDiscRequest());
}
/**
* Instantiate a request builder with defaults from an existing request.
*
* @param request parent JDisc request
* @see HttpRequest
*/
public Builder(com.yahoo.jdisc.http.HttpRequest request) {
this(null, request);
}
private Builder(HttpRequest parent, com.yahoo.jdisc.http.HttpRequest jdiscRequest) {
this.parent = parent;
this.jdiscRequest = jdiscRequest;
populateProperties();
}
private void populateProperties() {
if (parent == null) return;
properties.putAll(parent.propertyMap());
}
/**
* Add a parameter to the request. Multi-value parameters are not supported.
*
* @param key parameter name
* @param value parameter value
* @return this Builder instance
*/
public Builder put(String key, String value) {
properties.put(key, value);
return this;
}
/**
* Removes the parameter from the request properties.
* If there is no such parameter, nothing will be done.
*/
public Builder removeProperty(String parameterName) {
properties.remove(parameterName);
return this;
}
/**
* Set the HTTP method for the new request.
*
* @param method the HTTP method to use for the new request
* @return this Builder instance
*/
public Builder method(Method method) {
this.method = method;
return this;
}
/**
* Define the JDisc parent request.
*
* @param request a valid JDisc request for the current container
* @return this Builder instance
*/
public Builder jdiscRequest(com.yahoo.jdisc.http.HttpRequest request) {
this.jdiscRequest = request;
return this;
}
/**
* Set an inputstream to use for the request. If not set, the data from
* the original HttpRequest is used.
*
* @param requestData data to be consumed, e.g. POST data
* @return this Builder instance
*/
public Builder requestData(InputStream requestData) {
this.requestData = requestData;
return this;
}
/**
* Set the URI of the server request created.
*
* @param uri a valid URI for a server request
* @return this Builder instance
*/
public Builder uri(URI uri) {
this.uri = uri;
return this;
}
/**
* Create a new HTTP request without creating a new JDisc request. This
* is for scenarios where another HTTP request handler is invoked
* directly without dispatching through JDisc. The parent JDisc request
* for the original HttpRequest will be passed on the new HttpRequest
* instance's JDisc request, but no properties will be propagated into
* the original JDisc request.
*
* @return a new HttpRequest instance reflecting the given request data and parameters
*/
public HttpRequest createDirectRequest() {
ensureRequestData();
ensureJdiscParent();
return new HttpRequest(jdiscRequest, requestData, properties);
}
/**
* Start of API for synchronous HTTP request dispatch. Not yet ready for use.
*
* @return a new client request
*/
public HttpRequest createClientRequest() {
ensureJdiscParent();
requireUri();
com.yahoo.jdisc.http.HttpRequest clientRequest;
if (method == null) {
clientRequest = com.yahoo.jdisc.http.HttpRequest
.newClientRequest(jdiscRequest, uri);
} else {
if (version == null) {
clientRequest = com.yahoo.jdisc.http.HttpRequest
.newClientRequest(jdiscRequest, uri, method);
} else {
clientRequest = com.yahoo.jdisc.http.HttpRequest
.newClientRequest(jdiscRequest, uri, method,
version);
}
}
setParameters(clientRequest);
return new HttpRequest(clientRequest, requestData, properties);
}
/**
* Start of API for synchronous HTTP request dispatch. Not yet ready for use.
*
* @return a new server request
*/
public HttpRequest createServerRequest() {
requireUri();
requireContainer();
com.yahoo.jdisc.http.HttpRequest serverRequest;
if (method == null) {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri);
} else {
if (version == null) {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri, method);
} else {
if (remoteAddress == null) {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri, method,
version);
} else {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri, method,
version, remoteAddress);
}
}
}
setParameters(serverRequest);
return new HttpRequest(serverRequest, requestData, properties);
}
private void setParameters(com.yahoo.jdisc.http.HttpRequest request) {
for (Map.Entry<String, String> entry : properties.entrySet()) {
request.parameters().put(entry.getKey(), wrap(entry.getValue()));
}
}
} |
```suggestion .flatMap(hostResource -> hostResource.spec().membership().stream()) ``` | public void validate(VespaModel model, DeployState state) {
if (! state.isHostedTenantApplication(model.getAdmin().getApplicationType())) return;
model.getContainerClusters().forEach((__, cluster) -> {
var isExclusive = model.hostSystem().getHosts()
.stream()
.map(hostResource -> hostResource.spec().membership())
.filter(Optional::isPresent)
.map(Optional::get)
.filter(membership -> membership.cluster().id().equals(cluster.id()))
.anyMatch(membership -> membership.cluster().isExclusive());
if (! isExclusive)
validateS3UlsInConfig(state, cluster);
});
} | .map(Optional::get) | public void validate(VespaModel model, DeployState state) {
if (! state.isHostedTenantApplication(model.getAdmin().getApplicationType())) return;
model.getContainerClusters().forEach((__, cluster) -> {
var isExclusive = hasExclusiveNodes(model, cluster);
validateS3UlsInConfig(state, cluster, isExclusive);
});
} | class UrlConfigValidator extends Validator {
@Override
private static void validateS3UlsInConfig(DeployState state, ApplicationContainerCluster cluster) {
var match = state.getFileRegistry().export().stream()
.filter(fileReference -> fileReference.relativePath.startsWith("s3:
.findFirst();
if (match.isPresent()) {
String message = "Found s3:
if (state.zone().system().isPublic())
throw new IllegalArgumentException(message + ". Nodes in the cluster need to be 'exclusive'," +
" see https:
else
throw new IllegalArgumentException(message + ". This is only supported in public systems");
}
}
} | class UrlConfigValidator extends Validator {
@Override
private static boolean hasExclusiveNodes(VespaModel model, ApplicationContainerCluster cluster) {
return model.hostSystem().getHosts()
.stream()
.flatMap(hostResource -> hostResource.spec().membership().stream())
.filter(membership -> membership.cluster().id().equals(cluster.id()))
.anyMatch(membership -> membership.cluster().isExclusive());
}
private static void validateS3UlsInConfig(DeployState state, ApplicationContainerCluster cluster, boolean isExclusive) {
if (hasUrlInConfig(state)) {
String message = "Found s3:
if ( ! state.zone().system().isPublic())
throw new IllegalArgumentException(message + ". This is only supported in public systems");
else if ( ! isExclusive)
throw new IllegalArgumentException(message + ". Nodes in the cluster need to be 'exclusive'," +
" see https:
}
}
private static boolean hasUrlInConfig(DeployState state) {
return state.getFileRegistry().export().stream()
.anyMatch(fileReference -> fileReference.relativePath.startsWith("s3:
}
} |
Yup, thanks | public void validate(VespaModel model, DeployState state) {
if (! state.isHostedTenantApplication(model.getAdmin().getApplicationType())) return;
model.getContainerClusters().forEach((__, cluster) -> {
var isExclusive = model.hostSystem().getHosts()
.stream()
.map(hostResource -> hostResource.spec().membership())
.filter(Optional::isPresent)
.map(Optional::get)
.filter(membership -> membership.cluster().id().equals(cluster.id()))
.anyMatch(membership -> membership.cluster().isExclusive());
if (! isExclusive)
validateS3UlsInConfig(state, cluster);
});
} | .map(Optional::get) | public void validate(VespaModel model, DeployState state) {
if (! state.isHostedTenantApplication(model.getAdmin().getApplicationType())) return;
model.getContainerClusters().forEach((__, cluster) -> {
var isExclusive = hasExclusiveNodes(model, cluster);
validateS3UlsInConfig(state, cluster, isExclusive);
});
} | class UrlConfigValidator extends Validator {
@Override
private static void validateS3UlsInConfig(DeployState state, ApplicationContainerCluster cluster) {
var match = state.getFileRegistry().export().stream()
.filter(fileReference -> fileReference.relativePath.startsWith("s3:
.findFirst();
if (match.isPresent()) {
String message = "Found s3:
if (state.zone().system().isPublic())
throw new IllegalArgumentException(message + ". Nodes in the cluster need to be 'exclusive'," +
" see https:
else
throw new IllegalArgumentException(message + ". This is only supported in public systems");
}
}
} | class UrlConfigValidator extends Validator {
@Override
private static boolean hasExclusiveNodes(VespaModel model, ApplicationContainerCluster cluster) {
return model.hostSystem().getHosts()
.stream()
.flatMap(hostResource -> hostResource.spec().membership().stream())
.filter(membership -> membership.cluster().id().equals(cluster.id()))
.anyMatch(membership -> membership.cluster().isExclusive());
}
private static void validateS3UlsInConfig(DeployState state, ApplicationContainerCluster cluster, boolean isExclusive) {
if (hasUrlInConfig(state)) {
String message = "Found s3:
if ( ! state.zone().system().isPublic())
throw new IllegalArgumentException(message + ". This is only supported in public systems");
else if ( ! isExclusive)
throw new IllegalArgumentException(message + ". Nodes in the cluster need to be 'exclusive'," +
" see https:
}
}
private static boolean hasUrlInConfig(DeployState state) {
return state.getFileRegistry().export().stream()
.anyMatch(fileReference -> fileReference.relativePath.startsWith("s3:
}
} |
Consider deprecating | protected static int registerClass(int id, Class<? extends Identifiable> spec) {
if (registry == null) {
registry = new Registry();
}
registry.add(id, spec);
return id;
} | } | protected static int registerClass(int id, Class<? extends Identifiable> spec) {
if (registry == null) {
registry = new Registry();
}
registry.add(id, spec);
return id;
} | class to register
* @return the identifier argument
*/ | class to register
* @return the identifier argument
*/ |
Now that `hasUrlInConfig()` does not check for scheme, we should probably make the error message scheme-neutral too. | private static void validateS3UlsInConfig(DeployState state, ApplicationContainerCluster cluster, boolean isExclusive) {
if (hasUrlInConfig(cluster)) {
String message = "Found s3:
if ( ! state.zone().system().isPublic())
throw new IllegalArgumentException(message + ". This is only supported in public systems");
else if ( ! isExclusive)
throw new IllegalArgumentException(message + ". Nodes in the cluster need to be 'exclusive'," +
" see https:
}
} | String message = "Found s3: | private static void validateS3UlsInConfig(DeployState state, ApplicationContainerCluster cluster, boolean isExclusive) {
if (hasS3UrlInConfig(cluster)) {
String message = "Found s3:
if ( ! state.zone().system().isPublic())
throw new IllegalArgumentException(message + ". This is only supported in public systems");
else if ( ! isExclusive)
throw new IllegalArgumentException(message + ". Nodes in the cluster need to be 'exclusive'," +
" see https:
}
} | class UrlConfigValidator extends Validator {
@Override
public void validate(VespaModel model, DeployState state) {
if (! state.isHostedTenantApplication(model.getAdmin().getApplicationType())) return;
model.getContainerClusters().forEach((__, cluster) -> {
var isExclusive = hasExclusiveNodes(model, cluster);
validateS3UlsInConfig(state, cluster, isExclusive);
});
}
private static boolean hasExclusiveNodes(VespaModel model, ApplicationContainerCluster cluster) {
return model.hostSystem().getHosts()
.stream()
.flatMap(hostResource -> hostResource.spec().membership().stream())
.filter(membership -> membership.cluster().id().equals(cluster.id()))
.anyMatch(membership -> membership.cluster().isExclusive());
}
private static boolean hasUrlInConfig(ApplicationContainerCluster cluster) {
return cluster.userConfiguredUrls().all().size() > 0;
}
} | class UrlConfigValidator extends Validator {
@Override
public void validate(VespaModel model, DeployState state) {
if (! state.isHostedTenantApplication(model.getAdmin().getApplicationType())) return;
model.getContainerClusters().forEach((__, cluster) -> {
var isExclusive = hasExclusiveNodes(model, cluster);
validateS3UlsInConfig(state, cluster, isExclusive);
});
}
private static boolean hasExclusiveNodes(VespaModel model, ApplicationContainerCluster cluster) {
return model.hostSystem().getHosts()
.stream()
.flatMap(hostResource -> hostResource.spec().membership().stream())
.filter(membership -> membership.cluster().id().equals(cluster.id()))
.anyMatch(membership -> membership.cluster().isExclusive());
}
private static boolean hasS3UrlInConfig(ApplicationContainerCluster cluster) {
return cluster.userConfiguredUrls().all().stream()
.anyMatch(url -> url.startsWith("s3:
}
} |
Changed this to only consider s3 urls in validator | private static void validateS3UlsInConfig(DeployState state, ApplicationContainerCluster cluster, boolean isExclusive) {
if (hasUrlInConfig(cluster)) {
String message = "Found s3:
if ( ! state.zone().system().isPublic())
throw new IllegalArgumentException(message + ". This is only supported in public systems");
else if ( ! isExclusive)
throw new IllegalArgumentException(message + ". Nodes in the cluster need to be 'exclusive'," +
" see https:
}
} | String message = "Found s3: | private static void validateS3UlsInConfig(DeployState state, ApplicationContainerCluster cluster, boolean isExclusive) {
if (hasS3UrlInConfig(cluster)) {
String message = "Found s3:
if ( ! state.zone().system().isPublic())
throw new IllegalArgumentException(message + ". This is only supported in public systems");
else if ( ! isExclusive)
throw new IllegalArgumentException(message + ". Nodes in the cluster need to be 'exclusive'," +
" see https:
}
} | class UrlConfigValidator extends Validator {
@Override
public void validate(VespaModel model, DeployState state) {
if (! state.isHostedTenantApplication(model.getAdmin().getApplicationType())) return;
model.getContainerClusters().forEach((__, cluster) -> {
var isExclusive = hasExclusiveNodes(model, cluster);
validateS3UlsInConfig(state, cluster, isExclusive);
});
}
private static boolean hasExclusiveNodes(VespaModel model, ApplicationContainerCluster cluster) {
return model.hostSystem().getHosts()
.stream()
.flatMap(hostResource -> hostResource.spec().membership().stream())
.filter(membership -> membership.cluster().id().equals(cluster.id()))
.anyMatch(membership -> membership.cluster().isExclusive());
}
private static boolean hasUrlInConfig(ApplicationContainerCluster cluster) {
return cluster.userConfiguredUrls().all().size() > 0;
}
} | class UrlConfigValidator extends Validator {
@Override
public void validate(VespaModel model, DeployState state) {
if (! state.isHostedTenantApplication(model.getAdmin().getApplicationType())) return;
model.getContainerClusters().forEach((__, cluster) -> {
var isExclusive = hasExclusiveNodes(model, cluster);
validateS3UlsInConfig(state, cluster, isExclusive);
});
}
private static boolean hasExclusiveNodes(VespaModel model, ApplicationContainerCluster cluster) {
return model.hostSystem().getHosts()
.stream()
.flatMap(hostResource -> hostResource.spec().membership().stream())
.filter(membership -> membership.cluster().id().equals(cluster.id()))
.anyMatch(membership -> membership.cluster().isExclusive());
}
private static boolean hasS3UrlInConfig(ApplicationContainerCluster cluster) {
return cluster.userConfiguredUrls().all().stream()
.anyMatch(url -> url.startsWith("s3:
}
} |
Can't this be simplified to: ``` return billablePlans().stream() .flatMap(p -> billing.tenantsWithPlan(tenants, p.id()).stream()) .toList(); ``` | private List<TenantName> billableTenants(List<TenantName> tenants) {
return billablePlans().stream()
.collect(Collectors.toMap(
p -> p,
p -> billing.tenantsWithPlan(tenants, p.id())))
.values()
.stream()
.flatMap(Collection::stream)
.toList();
} | .toList(); | private List<TenantName> billableTenants(List<TenantName> tenants) {
return billablePlans().stream()
.flatMap(p -> billing.tenantsWithPlan(tenants, p.id()).stream())
.toList();
} | class BillingReportMaintainer extends ControllerMaintainer {
private final BillingReporter reporter;
private final BillingController billing;
private final PlanRegistry plans;
public BillingReportMaintainer(Controller controller, Duration interval) {
super(controller, interval, null, Set.of(SystemName.PublicCd));
this.reporter = controller.serviceRegistry().billingReporter();
this.billing = controller.serviceRegistry().billingController();
this.plans = controller.serviceRegistry().planRegistry();
}
@Override
protected double maintain() {
reporter.maintainResources();
maintainTenants();
return 0.0;
}
private void maintainTenants() {
var tenants = cloudTenants();
var tenantNames = List.copyOf(tenants.keySet());
var billableTenants = billableTenants(tenantNames);
billableTenants.forEach(tenant -> {
controller().tenants().lockIfPresent(tenant, LockedTenant.Cloud.class, locked -> {
var ref = reporter.maintainTenant(locked.get());
if (locked.get().billingReference().isEmpty() || ! locked.get().billingReference().get().equals(ref)) {
controller().tenants().store(locked.with(ref));
}
});
});
}
private Map<TenantName, CloudTenant> cloudTenants() {
return controller().tenants().asList()
.stream()
.filter(CloudTenant.class::isInstance)
.map(CloudTenant.class::cast)
.collect(Collectors.toMap(
Tenant::name,
Function.identity()));
}
private List<Plan> billablePlans() {
return plans.all().stream()
.filter(Plan::isBilled)
.toList();
}
} | class BillingReportMaintainer extends ControllerMaintainer {
private final BillingReporter reporter;
private final BillingController billing;
private final PlanRegistry plans;
public BillingReportMaintainer(Controller controller, Duration interval) {
super(controller, interval, null, Set.of(SystemName.PublicCd));
this.reporter = controller.serviceRegistry().billingReporter();
this.billing = controller.serviceRegistry().billingController();
this.plans = controller.serviceRegistry().planRegistry();
}
@Override
protected double maintain() {
maintainTenants();
return 0.0;
}
private void maintainTenants() {
var tenants = cloudTenants();
var tenantNames = List.copyOf(tenants.keySet());
var billableTenants = billableTenants(tenantNames);
billableTenants.forEach(tenant -> {
controller().tenants().lockIfPresent(tenant, LockedTenant.Cloud.class, locked -> {
var ref = reporter.maintainTenant(locked.get());
if (locked.get().billingReference().isEmpty() || ! locked.get().billingReference().get().equals(ref)) {
controller().tenants().store(locked.with(ref));
}
});
});
}
private Map<TenantName, CloudTenant> cloudTenants() {
return controller().tenants().asList()
.stream()
.filter(CloudTenant.class::isInstance)
.map(CloudTenant.class::cast)
.collect(Collectors.toMap(
Tenant::name,
Function.identity()));
}
private List<Plan> billablePlans() {
return plans.all().stream()
.filter(Plan::isBilled)
.toList();
}
} |
Will this equals() really do what's intended? (owners is a `Deque`) Otherwise LGTM | public void unregisterSession(String session, NetworkOwner owner, boolean broadcast) {
sessions.computeIfPresent(session, (name, owners) -> {
if (owners.equals(List.of(owner))) {
if (broadcast)
net.unregisterSession(session);
return null;
}
owners.remove(owner);
return owners;
});
} | if (owners.equals(List.of(owner))) { | public void unregisterSession(String session, NetworkOwner owner, boolean broadcast) {
sessions.computeIfPresent(session, (name, owners) -> {
if (owners.size() == 1 && owners.contains(owner)) {
if (broadcast)
net.unregisterSession(session);
return null;
}
owners.remove(owner);
return owners;
});
} | class NetworkMultiplexer implements NetworkOwner {
private static final Logger log = Logger.getLogger(NetworkMultiplexer.class.getName());
private final Network net;
private final Deque<NetworkOwner> owners = new ConcurrentLinkedDeque<>();
private final Map<String, Deque<NetworkOwner>> sessions = new ConcurrentHashMap<>();
private final AtomicBoolean disowned;
private NetworkMultiplexer(Network net, boolean shared) {
net.attach(this);
this.net = net;
this.disowned = new AtomicBoolean( ! shared);
}
/** Returns a network multiplexer which will be shared between several {@link NetworkOwner}s,
* and will shut down when all these have detached, and {@link
public static NetworkMultiplexer shared(Network net) {
return new NetworkMultiplexer(net, true);
}
/** Returns a network multiplexer with a single {@link NetworkOwner}, which shuts down when this owner detaches. */
public static NetworkMultiplexer dedicated(Network net) {
return new NetworkMultiplexer(net, false);
}
public void registerSession(String session, NetworkOwner owner, boolean broadcast) {
sessions.compute(session, (name, owners) -> {
if (owners == null) {
owners = new ConcurrentLinkedDeque<>();
if (broadcast)
net.registerSession(session);
}
else if (owners.contains(owner))
throw new IllegalArgumentException("Session '" + session + "' with owner '" + owner + "' already registered with " + this);
owners.push(owner);
return owners;
});
}
@Override
public Protocol getProtocol(Utf8Array name) {
Protocol protocol = null;
for (NetworkOwner owner : owners)
protocol = owner.getProtocol(name) == null ? protocol : owner.getProtocol(name);
return protocol;
}
@Override
public void deliverMessage(Message message, String session) {
NetworkOwner owner = sessions.getOrDefault(session, owners).peek();
if (owner == null) {
log.warning(this + " received message '" + message + "' with no owners attached");
message.discard();
}
else
owner.deliverMessage(message, session);
}
/** Attach the network owner to this, allowing this to forward messages to it. */
public void attach(NetworkOwner owner) {
if (owners.contains(owner))
throw new IllegalArgumentException(owner + " is already attached to " + this);
owners.add(owner);
}
/** Detach the network owner from this, no longer allowing messages to it, and shutting down this is ownerless. */
public void detach(NetworkOwner owner) {
if ( ! owners.remove(owner))
throw new IllegalArgumentException(owner + " not attached to " + this);
destroyIfOwnerless();
}
/** Signal that external ownership of this is relinquished, allowing destruction on last owner detachment. */
public void disown() {
if (disowned.getAndSet(true))
throw new IllegalStateException("Destroy called on a dedicated multiplexer--" +
"this automatically shuts down when detached from--or " +
"called multiple times on a shared multiplexer");
destroyIfOwnerless();
}
private void destroyIfOwnerless() {
if (disowned.get() && owners.isEmpty())
net.shutdown();
}
public Network net() {
return net;
}
@Override
public String toString() {
return "network multiplexer with owners: " + owners + ", sessions: " + sessions + " and destructible: " + disowned.get();
}
} | class NetworkMultiplexer implements NetworkOwner {
private static final Logger log = Logger.getLogger(NetworkMultiplexer.class.getName());
private final Network net;
private final Deque<NetworkOwner> owners = new ConcurrentLinkedDeque<>();
private final Map<String, Deque<NetworkOwner>> sessions = new ConcurrentHashMap<>();
private final AtomicBoolean disowned;
private NetworkMultiplexer(Network net, boolean shared) {
net.attach(this);
this.net = net;
this.disowned = new AtomicBoolean( ! shared);
}
/** Returns a network multiplexer which will be shared between several {@link NetworkOwner}s,
* and will shut down when all these have detached, and {@link
public static NetworkMultiplexer shared(Network net) {
return new NetworkMultiplexer(net, true);
}
/** Returns a network multiplexer with a single {@link NetworkOwner}, which shuts down when this owner detaches. */
public static NetworkMultiplexer dedicated(Network net) {
return new NetworkMultiplexer(net, false);
}
public void registerSession(String session, NetworkOwner owner, boolean broadcast) {
sessions.compute(session, (name, owners) -> {
if (owners == null) {
owners = new ConcurrentLinkedDeque<>();
if (broadcast)
net.registerSession(session);
}
else if (owners.contains(owner))
throw new IllegalArgumentException("Session '" + session + "' with owner '" + owner + "' already registered with " + this);
owners.push(owner);
return owners;
});
}
@Override
public Protocol getProtocol(Utf8Array name) {
Protocol protocol = null;
for (NetworkOwner owner : owners)
protocol = owner.getProtocol(name) == null ? protocol : owner.getProtocol(name);
return protocol;
}
@Override
public void deliverMessage(Message message, String session) {
NetworkOwner owner = sessions.getOrDefault(session, owners).peek();
if (owner == null) {
log.warning(this + " received message '" + message + "' with no owners attached");
message.discard();
}
else
owner.deliverMessage(message, session);
}
/** Attach the network owner to this, allowing this to forward messages to it. */
public void attach(NetworkOwner owner) {
if (owners.contains(owner))
throw new IllegalArgumentException(owner + " is already attached to " + this);
owners.add(owner);
}
/** Detach the network owner from this, no longer allowing messages to it, and shutting down this is ownerless. */
public void detach(NetworkOwner owner) {
if ( ! owners.remove(owner))
throw new IllegalArgumentException(owner + " not attached to " + this);
destroyIfOwnerless();
}
/** Signal that external ownership of this is relinquished, allowing destruction on last owner detachment. */
public void disown() {
if (disowned.getAndSet(true))
throw new IllegalStateException("Destroy called on a dedicated multiplexer--" +
"this automatically shuts down when detached from--or " +
"called multiple times on a shared multiplexer");
destroyIfOwnerless();
}
private void destroyIfOwnerless() {
if (disowned.get() && owners.isEmpty())
net.shutdown();
}
public Network net() {
return net;
}
@Override
public String toString() {
return "network multiplexer with owners: " + owners + ", sessions: " + sessions + " and destructible: " + disowned.get();
}
} |
Should we have a guard against non-existsent 'method' field? | private HttpResponse putAccountantInvoiceExport(RestApi.RequestContext ctx, Slime slime) {
var billId = ctx.attributes().get("invoice")
.map(id -> Bill.Id.of((String) id))
.orElseThrow(() -> new RestApiException.BadRequest("Missing bill ID"));
var exportMethod = slime.get().field("method").asString();
var result = billing.exportBill(billId, exportMethod);
return new MessageResponse("Bill has been exported: " + result);
} | var exportMethod = slime.get().field("method").asString(); | private HttpResponse putAccountantInvoiceExport(RestApi.RequestContext ctx, Slime slime) {
var billId = ctx.attributes().get("invoice")
.map(id -> Bill.Id.of((String) id))
.orElseThrow(() -> new RestApiException.BadRequest("Missing bill ID"));
var bill = billing.getBill(billId);
var cloudTenant = tenants.require(bill.tenant(), CloudTenant.class);
var exportMethod = slime.get().field("method").asString();
var result = billing.exportBill(bill, exportMethod, cloudTenant);
return new MessageResponse("Bill has been exported: " + result);
} | class BillingApiHandlerV2 extends RestApiRequestHandler<BillingApiHandlerV2> {
private static final Logger log = Logger.getLogger(BillingApiHandlerV2.class.getName());
private static final String[] CSV_INVOICE_HEADER = new String[]{ "ID", "Tenant", "From", "To", "CpuHours", "MemoryHours", "DiskHours", "Cpu", "Memory", "Disk", "Additional" };
private final ApplicationController applications;
private final TenantController tenants;
private final BillingController billing;
private final PlanRegistry planRegistry;
private final Clock clock;
public BillingApiHandlerV2(ThreadedHttpRequestHandler.Context context, Controller controller) {
super(context, BillingApiHandlerV2::createRestApi);
this.applications = controller.applications();
this.tenants = controller.tenants();
this.billing = controller.serviceRegistry().billingController();
this.planRegistry = controller.serviceRegistry().planRegistry();
this.clock = controller.serviceRegistry().clock();
}
private static RestApi createRestApi(BillingApiHandlerV2 self) {
return RestApi.builder()
/*
* This is the API that is available to tenants to view their status
*/
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}")
.get(self::tenant)
.patch(Slime.class, self::patchTenant))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/usage")
.get(self::tenantUsage))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/bill")
.get(self::tenantInvoiceList))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/bill/{invoice}")
.get(self::tenantInvoice))
/*
* This is the API that is created for accountant role in Vespa Cloud
*/
.addRoute(RestApi.route("/billing/v2/accountant")
.get(self::accountant))
.addRoute(RestApi.route("/billing/v2/accountant/preview/tenant/{tenant}")
.get(self::previewBill)
.post(Slime.class, self::createBill))
.addRoute(RestApi.route("/billing/v2/accountant/bill/{invoice}/export")
.put(Slime.class, self::putAccountantInvoiceExport))
.addRoute(RestApi.route("/billing/v2/accountant/plans")
.get(self::plans))
.addExceptionMapper(RuntimeException.class, (c, e) -> ErrorResponses.logThrowing(c.request(), log, e))
.build();
}
private Slime tenant(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var plan = planFor(tenant.name());
var collectionMethod = billing.getCollectionMethod(tenant.name());
var response = new Slime();
var cursor = response.setObject();
cursor.setString("tenant", tenant.name().value());
toSlime(cursor.setObject("plan"), plan);
cursor.setString("collection", collectionMethod.name());
return response;
}
private Slime patchTenant(RestApi.RequestContext requestContext, Slime body) {
var security = requestContext.attributes().get(SecurityContext.ATTRIBUTE_NAME)
.map(SecurityContext.class::cast)
.orElseThrow(() -> new RestApiException.Forbidden("Must be logged in"));
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var newPlan = body.get().field("plan");
var newCollection = body.get().field("collection");
if (newPlan.valid() && newPlan.type() == Type.STRING) {
var planId = PlanId.from(newPlan.asString());
var hasDeployments = tenantHasDeployments(tenant.name());
var result = billing.setPlan(tenant.name(), planId, hasDeployments, false);
if (! result.isSuccess()) {
throw new RestApiException.Forbidden(result.getErrorMessage().get());
}
}
if (newCollection.valid() && newCollection.type() == Type.STRING) {
if (security.roles().contains(Role.hostedAccountant())) {
var collection = CollectionMethod.valueOf(newCollection.asString());
billing.setCollectionMethod(tenant.name(), collection);
} else {
throw new RestApiException.Forbidden("Only accountant can change billing method");
}
}
var response = new Slime();
var cursor = response.setObject();
cursor.setString("tenant", tenant.name().value());
toSlime(cursor.setObject("plan"), planFor(tenant.name()));
cursor.setString("collection", billing.getCollectionMethod(tenant.name()).name());
return response;
}
private Slime tenantInvoiceList(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var slime = new Slime();
invoicesSummaryToSlime(slime.setObject().setArray("invoices"), billing.getBillsForTenant(tenant.name()));
return slime;
}
private HttpResponse tenantInvoice(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var invoiceId = requestContext.pathParameters().getStringOrThrow("invoice");
var format = requestContext.queryParameters().getString("format").orElse("json");
var invoice = billing.getBillsForTenant(tenant.name()).stream()
.filter(inv -> inv.id().value().equals(invoiceId))
.findAny()
.orElseThrow(RestApiException.NotFound::new);
if (format.equals("json")) {
var slime = new Slime();
toSlime(slime.setObject(), invoice);
return new SlimeJsonResponse(slime);
}
if (format.equals("csv")) {
var csv = toCsv(invoice);
return new CsvResponse(CSV_INVOICE_HEADER, csv);
}
throw new RestApiException.BadRequest("Unknown format: " + format);
}
private boolean tenantHasDeployments(TenantName tenant) {
return applications.asList(tenant).stream()
.flatMap(app -> app.instances().values().stream())
.mapToLong(instance -> instance.deployments().size())
.sum() > 0;
}
private Slime tenantUsage(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var untilAt = untilParameter(requestContext);
var usage = billing.createUncommittedBill(tenant.name(), untilAt);
var slime = new Slime();
usageToSlime(slime.setObject(), usage);
return slime;
}
private Slime accountant(RestApi.RequestContext requestContext) {
var untilAt = untilParameter(requestContext);
var usagePerTenant = billing.createUncommittedBills(untilAt);
var response = new Slime();
var tenantsResponse = response.setObject().setArray("tenants");
tenants.asList().stream().sorted(Comparator.comparing(Tenant::name)).forEach(tenant -> {
var usage = Optional.ofNullable(usagePerTenant.get(tenant.name()));
var tenantResponse = tenantsResponse.addObject();
tenantResponse.setString("tenant", tenant.name().value());
toSlime(tenantResponse.setObject("plan"), planFor(tenant.name()));
toSlime(tenantResponse.setObject("quota"), billing.getQuota(tenant.name()));
tenantResponse.setString("collection", billing.getCollectionMethod(tenant.name()).name());
tenantResponse.setString("lastBill", usage.map(Bill::getStartDate).map(DateTimeFormatter.ISO_DATE::format).orElse(null));
tenantResponse.setString("unbilled", usage.map(Bill::sum).map(BigDecimal::toPlainString).orElse("0.00"));
});
return response;
}
private Slime previewBill(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var untilAt = untilParameter(requestContext);
var usage = billing.createUncommittedBill(tenant.name(), untilAt);
var slime = new Slime();
toSlime(slime.setObject(), usage);
return slime;
}
private HttpResponse createBill(RestApi.RequestContext requestContext, Slime slime) {
var body = slime.get();
var security = requestContext.attributes().get(SecurityContext.ATTRIBUTE_NAME)
.map(SecurityContext.class::cast)
.orElseThrow(() -> new RestApiException.Forbidden("Must be logged in"));
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var startAt = LocalDate.parse(getInspectorFieldOrThrow(body, "from")).atStartOfDay(ZoneOffset.UTC);
var endAt = LocalDate.parse(getInspectorFieldOrThrow(body, "to")).plusDays(1).atStartOfDay(ZoneOffset.UTC);
var invoiceId = billing.createBillForPeriod(tenant.name(), startAt, endAt, security.principal().getName());
return new MessageResponse("Created bill " + invoiceId.value());
}
private HttpResponse plans(RestApi.RequestContext ctx) {
var slime = new Slime();
var root = slime.setObject();
var plans = root.setArray("plans");
for (var plan : planRegistry.all()) {
var p = plans.addObject();
p.setString("id", plan.id().value());
p.setString("name", plan.displayName());
}
return new SlimeJsonResponse(slime);
}
private void invoicesSummaryToSlime(Cursor slime, List<Bill> bills) {
bills.forEach(invoice -> invoiceSummaryToSlime(slime.addObject(), invoice));
}
private void invoiceSummaryToSlime(Cursor slime, Bill bill) {
slime.setString("id", bill.id().value());
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
slime.setString("status", bill.status());
}
private void usageToSlime(Cursor slime, Bill bill) {
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndTime().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
toSlime(slime.setArray("items"), bill.lineItems());
}
private void toSlime(Cursor slime, Bill bill) {
slime.setString("id", bill.id().value());
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
slime.setString("status", bill.status());
toSlime(slime.setArray("statusHistory"), bill.statusHistory());
toSlime(slime.setArray("items"), bill.lineItems());
}
private void toSlime(Cursor slime, Bill.StatusHistory history) {
history.getHistory().forEach((key, value) -> {
var c = slime.addObject();
c.setString("at", key.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME));
c.setString("status", value);
});
}
private void toSlime(Cursor slime, List<Bill.LineItem> items) {
items.forEach(item -> toSlime(slime.addObject(), item));
}
private void toSlime(Cursor slime, Bill.LineItem item) {
slime.setString("id", item.id());
slime.setString("description", item.description());
slime.setString("amount",item.amount().toString());
toSlime(slime.setObject("plan"), planRegistry.plan(item.plan()).orElseThrow(() -> new RuntimeException("No such plan: '" + item.plan() + "'")));
item.getArchitecture().ifPresent(arch -> slime.setString("architecture", arch.name()));
slime.setLong("majorVersion", item.getMajorVersion());
item.applicationId().ifPresent(appId -> {
slime.setString("application", appId.application().value());
slime.setString("instance", appId.instance().value());
});
item.zoneId().ifPresent(z -> slime.setString("zone", z.value()));
toSlime(slime.setObject("cpu"), item.getCpuHours(), item.getCpuCost());
toSlime(slime.setObject("memory"), item.getMemoryHours(), item.getMemoryCost());
toSlime(slime.setObject("disk"), item.getDiskHours(), item.getDiskCost());
}
private void toSlime(Cursor slime, Optional<BigDecimal> hours, Optional<BigDecimal> cost) {
hours.ifPresent(h -> slime.setString("hours", h.toString()));
cost.ifPresent(c -> slime.setString("cost", c.toString()));
}
private List<Object[]> toCsv(Bill bill) {
return List.<Object[]>of(new Object[]{
bill.id().value(), bill.tenant().value(),
bill.getStartDate().format(DateTimeFormatter.ISO_DATE),
bill.getEndDate().format(DateTimeFormatter.ISO_DATE),
bill.sumCpuHours(), bill.sumMemoryHours(), bill.sumDiskHours(),
bill.sumCpuCost(), bill.sumMemoryCost(), bill.sumDiskCost(),
bill.sumAdditionalCost()
});
}
private LocalDate untilParameter(RestApi.RequestContext ctx) {
return ctx.queryParameters().getString("until")
.map(LocalDate::parse)
.orElseGet(() -> LocalDate.now(clock));
}
private static String getInspectorFieldOrThrow(Inspector inspector, String field) {
if (!inspector.field(field).valid())
throw new RestApiException.BadRequest("Field " + field + " cannot be null");
return inspector.field(field).asString();
}
private void toSlime(Cursor cursor, Plan plan) {
cursor.setString("id", plan.id().value());
cursor.setString("name", plan.displayName());
}
private void toSlime(Cursor cursor, Quota quota) {
cursor.setDouble("budget", quota.budget().map(BigDecimal::doubleValue).orElse(-1.0));
}
private Plan planFor(TenantName tenant) {
var planId = billing.getPlan(tenant);
return planRegistry.plan(planId)
.orElseThrow(() -> new RuntimeException("No such plan: '" + planId + "'"));
}
} | class BillingApiHandlerV2 extends RestApiRequestHandler<BillingApiHandlerV2> {
private static final Logger log = Logger.getLogger(BillingApiHandlerV2.class.getName());
private static final String[] CSV_INVOICE_HEADER = new String[]{ "ID", "Tenant", "From", "To", "CpuHours", "MemoryHours", "DiskHours", "Cpu", "Memory", "Disk", "Additional" };
private final ApplicationController applications;
private final TenantController tenants;
private final BillingController billing;
private final PlanRegistry planRegistry;
private final Clock clock;
public BillingApiHandlerV2(ThreadedHttpRequestHandler.Context context, Controller controller) {
super(context, BillingApiHandlerV2::createRestApi);
this.applications = controller.applications();
this.tenants = controller.tenants();
this.billing = controller.serviceRegistry().billingController();
this.planRegistry = controller.serviceRegistry().planRegistry();
this.clock = controller.serviceRegistry().clock();
}
private static RestApi createRestApi(BillingApiHandlerV2 self) {
return RestApi.builder()
/*
* This is the API that is available to tenants to view their status
*/
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}")
.get(self::tenant)
.patch(Slime.class, self::patchTenant))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/usage")
.get(self::tenantUsage))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/bill")
.get(self::tenantInvoiceList))
.addRoute(RestApi.route("/billing/v2/tenant/{tenant}/bill/{invoice}")
.get(self::tenantInvoice))
/*
* This is the API that is created for accountant role in Vespa Cloud
*/
.addRoute(RestApi.route("/billing/v2/accountant")
.get(self::accountant))
.addRoute(RestApi.route("/billing/v2/accountant/preview/tenant/{tenant}")
.get(self::previewBill)
.post(Slime.class, self::createBill))
.addRoute(RestApi.route("/billing/v2/accountant/bill/{invoice}/export")
.put(Slime.class, self::putAccountantInvoiceExport))
.addRoute(RestApi.route("/billing/v2/accountant/plans")
.get(self::plans))
.addExceptionMapper(RuntimeException.class, (c, e) -> ErrorResponses.logThrowing(c.request(), log, e))
.build();
}
private Slime tenant(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var plan = planFor(tenant.name());
var collectionMethod = billing.getCollectionMethod(tenant.name());
var response = new Slime();
var cursor = response.setObject();
cursor.setString("tenant", tenant.name().value());
toSlime(cursor.setObject("plan"), plan);
cursor.setString("collection", collectionMethod.name());
return response;
}
private Slime patchTenant(RestApi.RequestContext requestContext, Slime body) {
var security = requestContext.attributes().get(SecurityContext.ATTRIBUTE_NAME)
.map(SecurityContext.class::cast)
.orElseThrow(() -> new RestApiException.Forbidden("Must be logged in"));
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var newPlan = body.get().field("plan");
var newCollection = body.get().field("collection");
if (newPlan.valid() && newPlan.type() == Type.STRING) {
var planId = PlanId.from(newPlan.asString());
var hasDeployments = tenantHasDeployments(tenant.name());
var result = billing.setPlan(tenant.name(), planId, hasDeployments, false);
if (! result.isSuccess()) {
throw new RestApiException.Forbidden(result.getErrorMessage().get());
}
}
if (newCollection.valid() && newCollection.type() == Type.STRING) {
if (security.roles().contains(Role.hostedAccountant())) {
var collection = CollectionMethod.valueOf(newCollection.asString());
billing.setCollectionMethod(tenant.name(), collection);
} else {
throw new RestApiException.Forbidden("Only accountant can change billing method");
}
}
var response = new Slime();
var cursor = response.setObject();
cursor.setString("tenant", tenant.name().value());
toSlime(cursor.setObject("plan"), planFor(tenant.name()));
cursor.setString("collection", billing.getCollectionMethod(tenant.name()).name());
return response;
}
private Slime tenantInvoiceList(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var slime = new Slime();
invoicesSummaryToSlime(slime.setObject().setArray("invoices"), billing.getBillsForTenant(tenant.name()));
return slime;
}
private HttpResponse tenantInvoice(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var invoiceId = requestContext.pathParameters().getStringOrThrow("invoice");
var format = requestContext.queryParameters().getString("format").orElse("json");
var invoice = billing.getBillsForTenant(tenant.name()).stream()
.filter(inv -> inv.id().value().equals(invoiceId))
.findAny()
.orElseThrow(RestApiException.NotFound::new);
if (format.equals("json")) {
var slime = new Slime();
toSlime(slime.setObject(), invoice);
return new SlimeJsonResponse(slime);
}
if (format.equals("csv")) {
var csv = toCsv(invoice);
return new CsvResponse(CSV_INVOICE_HEADER, csv);
}
throw new RestApiException.BadRequest("Unknown format: " + format);
}
private boolean tenantHasDeployments(TenantName tenant) {
return applications.asList(tenant).stream()
.flatMap(app -> app.instances().values().stream())
.mapToLong(instance -> instance.deployments().size())
.sum() > 0;
}
private Slime tenantUsage(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var untilAt = untilParameter(requestContext);
var usage = billing.createUncommittedBill(tenant.name(), untilAt);
var slime = new Slime();
usageToSlime(slime.setObject(), usage);
return slime;
}
private Slime accountant(RestApi.RequestContext requestContext) {
var untilAt = untilParameter(requestContext);
var usagePerTenant = billing.createUncommittedBills(untilAt);
var response = new Slime();
var tenantsResponse = response.setObject().setArray("tenants");
tenants.asList().stream().sorted(Comparator.comparing(Tenant::name)).forEach(tenant -> {
var usage = Optional.ofNullable(usagePerTenant.get(tenant.name()));
var tenantResponse = tenantsResponse.addObject();
tenantResponse.setString("tenant", tenant.name().value());
toSlime(tenantResponse.setObject("plan"), planFor(tenant.name()));
toSlime(tenantResponse.setObject("quota"), billing.getQuota(tenant.name()));
tenantResponse.setString("collection", billing.getCollectionMethod(tenant.name()).name());
tenantResponse.setString("lastBill", usage.map(Bill::getStartDate).map(DateTimeFormatter.ISO_DATE::format).orElse(null));
tenantResponse.setString("unbilled", usage.map(Bill::sum).map(BigDecimal::toPlainString).orElse("0.00"));
});
return response;
}
private Slime previewBill(RestApi.RequestContext requestContext) {
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var untilAt = untilParameter(requestContext);
var usage = billing.createUncommittedBill(tenant.name(), untilAt);
var slime = new Slime();
toSlime(slime.setObject(), usage);
return slime;
}
private HttpResponse createBill(RestApi.RequestContext requestContext, Slime slime) {
var body = slime.get();
var security = requestContext.attributes().get(SecurityContext.ATTRIBUTE_NAME)
.map(SecurityContext.class::cast)
.orElseThrow(() -> new RestApiException.Forbidden("Must be logged in"));
var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
var tenant = tenants.require(tenantName, CloudTenant.class);
var startAt = LocalDate.parse(getInspectorFieldOrThrow(body, "from")).atStartOfDay(ZoneOffset.UTC);
var endAt = LocalDate.parse(getInspectorFieldOrThrow(body, "to")).plusDays(1).atStartOfDay(ZoneOffset.UTC);
var invoiceId = billing.createBillForPeriod(tenant.name(), startAt, endAt, security.principal().getName());
return new MessageResponse("Created bill " + invoiceId.value());
}
private HttpResponse plans(RestApi.RequestContext ctx) {
var slime = new Slime();
var root = slime.setObject();
var plans = root.setArray("plans");
for (var plan : planRegistry.all()) {
var p = plans.addObject();
p.setString("id", plan.id().value());
p.setString("name", plan.displayName());
}
return new SlimeJsonResponse(slime);
}
private void invoicesSummaryToSlime(Cursor slime, List<Bill> bills) {
bills.forEach(invoice -> invoiceSummaryToSlime(slime.addObject(), invoice));
}
private void invoiceSummaryToSlime(Cursor slime, Bill bill) {
slime.setString("id", bill.id().value());
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
slime.setString("status", bill.status());
}
private void usageToSlime(Cursor slime, Bill bill) {
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndTime().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
toSlime(slime.setArray("items"), bill.lineItems());
}
private void toSlime(Cursor slime, Bill bill) {
slime.setString("id", bill.id().value());
slime.setString("from", bill.getStartDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("to", bill.getEndDate().format(DateTimeFormatter.ISO_LOCAL_DATE));
slime.setString("total", bill.sum().toString());
slime.setString("status", bill.status());
toSlime(slime.setArray("statusHistory"), bill.statusHistory());
toSlime(slime.setArray("items"), bill.lineItems());
}
private void toSlime(Cursor slime, Bill.StatusHistory history) {
history.getHistory().forEach((key, value) -> {
var c = slime.addObject();
c.setString("at", key.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME));
c.setString("status", value);
});
}
private void toSlime(Cursor slime, List<Bill.LineItem> items) {
items.forEach(item -> toSlime(slime.addObject(), item));
}
private void toSlime(Cursor slime, Bill.LineItem item) {
slime.setString("id", item.id());
slime.setString("description", item.description());
slime.setString("amount",item.amount().toString());
toSlime(slime.setObject("plan"), planRegistry.plan(item.plan()).orElseThrow(() -> new RuntimeException("No such plan: '" + item.plan() + "'")));
item.getArchitecture().ifPresent(arch -> slime.setString("architecture", arch.name()));
slime.setLong("majorVersion", item.getMajorVersion());
item.applicationId().ifPresent(appId -> {
slime.setString("application", appId.application().value());
slime.setString("instance", appId.instance().value());
});
item.zoneId().ifPresent(z -> slime.setString("zone", z.value()));
toSlime(slime.setObject("cpu"), item.getCpuHours(), item.getCpuCost());
toSlime(slime.setObject("memory"), item.getMemoryHours(), item.getMemoryCost());
toSlime(slime.setObject("disk"), item.getDiskHours(), item.getDiskCost());
}
private void toSlime(Cursor slime, Optional<BigDecimal> hours, Optional<BigDecimal> cost) {
hours.ifPresent(h -> slime.setString("hours", h.toString()));
cost.ifPresent(c -> slime.setString("cost", c.toString()));
}
private List<Object[]> toCsv(Bill bill) {
return List.<Object[]>of(new Object[]{
bill.id().value(), bill.tenant().value(),
bill.getStartDate().format(DateTimeFormatter.ISO_DATE),
bill.getEndDate().format(DateTimeFormatter.ISO_DATE),
bill.sumCpuHours(), bill.sumMemoryHours(), bill.sumDiskHours(),
bill.sumCpuCost(), bill.sumMemoryCost(), bill.sumDiskCost(),
bill.sumAdditionalCost()
});
}
private LocalDate untilParameter(RestApi.RequestContext ctx) {
return ctx.queryParameters().getString("until")
.map(LocalDate::parse)
.orElseGet(() -> LocalDate.now(clock));
}
private static String getInspectorFieldOrThrow(Inspector inspector, String field) {
if (!inspector.field(field).valid())
throw new RestApiException.BadRequest("Field " + field + " cannot be null");
return inspector.field(field).asString();
}
private void toSlime(Cursor cursor, Plan plan) {
cursor.setString("id", plan.id().value());
cursor.setString("name", plan.displayName());
}
private void toSlime(Cursor cursor, Quota quota) {
cursor.setDouble("budget", quota.budget().map(BigDecimal::doubleValue).orElse(-1.0));
}
private Plan planFor(TenantName tenant) {
var planId = billing.getPlan(tenant);
return planRegistry.plan(planId)
.orElseThrow(() -> new RuntimeException("No such plan: '" + planId + "'"));
}
} |
Suggestion: `cloud-token-data-plane-fingerprints` => `data-plane-tokens/v1` | private void addCloudTokenSupport(DeployState state, ApplicationContainerCluster cluster) {
var server = cluster.getHttp().getHttpServer().get();
if (!enableTokenSupport(state)) return;
Set<String> tokenEndpoints = tokenEndpoints(state).stream()
.map(ContainerEndpoint::names)
.flatMap(Collection::stream)
.collect(Collectors.toSet());
var endpointCert = state.endpointCertificateSecrets().orElseThrow();
int tokenPort = getTokenDataplanePort(state).orElseThrow();
cluster.addSimpleComponent(DataplaneProxyCredentials.class);
cluster.addSimpleComponent(DataplaneProxyService.class);
var dataplaneProxy = new DataplaneProxy(
getMtlsDataplanePort(state),
tokenPort,
endpointCert.certificate(),
endpointCert.key(),
tokenEndpoints);
cluster.addComponent(dataplaneProxy);
var connector = HostedSslConnectorFactory.builder(server.getComponentId().getName()+"-token", tokenPort)
.tokenEndpoint(true)
.proxyProtocol(false, false)
.endpointCertificate(endpointCert)
.remoteAddressHeader("X-Forwarded-For")
.remotePortHeader("X-Forwarded-Port")
.clientAuth(SslClientAuth.NEED)
.build();
server.addConnector(connector);
var tokenChain = new HttpFilterChain("cloud-token-data-plane-secure", HttpFilterChain.Type.SYSTEM);
var tokenFilter = new CloudTokenDataPlaneFilter(cluster, state);
tokenChain.addInnerComponent(tokenFilter);
cluster.getHttp().getFilterChains().add(tokenChain);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == tokenPort).findAny().orElseThrow()
.setDefaultRequestFilterChain(tokenChain.getComponentId());
class CloudTokenDataPlaneHandler extends Handler implements CloudTokenDataPlaneFilterConfig.Producer {
CloudTokenDataPlaneHandler() {
super(new ComponentModel("com.yahoo.jdisc.http.filter.security.cloud.CloudTokenDataPlaneHandler", null, "jdisc-security-filters", null));
addServerBindings(SystemBindingPattern.fromHttpPortAndPath(Defaults.getDefaults().vespaWebServicePort(), "cloud-token-data-plane-fingerprints"));
}
@Override public void getConfig(Builder builder) { tokenFilter.getConfig(builder); }
}
cluster.addComponent(new CloudTokenDataPlaneHandler());
} | addServerBindings(SystemBindingPattern.fromHttpPortAndPath(Defaults.getDefaults().vespaWebServicePort(), "cloud-token-data-plane-fingerprints")); | private void addCloudTokenSupport(DeployState state, ApplicationContainerCluster cluster) {
var server = cluster.getHttp().getHttpServer().get();
if (!enableTokenSupport(state)) return;
Set<String> tokenEndpoints = tokenEndpoints(state).stream()
.map(ContainerEndpoint::names)
.flatMap(Collection::stream)
.collect(Collectors.toSet());
var endpointCert = state.endpointCertificateSecrets().orElseThrow();
int tokenPort = getTokenDataplanePort(state).orElseThrow();
cluster.addSimpleComponent(DataplaneProxyCredentials.class);
cluster.addSimpleComponent(DataplaneProxyService.class);
var dataplaneProxy = new DataplaneProxy(
getMtlsDataplanePort(state),
tokenPort,
endpointCert.certificate(),
endpointCert.key(),
tokenEndpoints);
cluster.addComponent(dataplaneProxy);
var connector = HostedSslConnectorFactory.builder(server.getComponentId().getName()+"-token", tokenPort)
.tokenEndpoint(true)
.proxyProtocol(false, false)
.endpointCertificate(endpointCert)
.remoteAddressHeader("X-Forwarded-For")
.remotePortHeader("X-Forwarded-Port")
.clientAuth(SslClientAuth.NEED)
.build();
server.addConnector(connector);
var tokenChain = new HttpFilterChain("cloud-token-data-plane-secure", HttpFilterChain.Type.SYSTEM);
var tokenFilter = new CloudTokenDataPlaneFilter(cluster, state);
tokenChain.addInnerComponent(tokenFilter);
cluster.getHttp().getFilterChains().add(tokenChain);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == tokenPort).findAny().orElseThrow()
.setDefaultRequestFilterChain(tokenChain.getComponentId());
class CloudTokenDataPlaneHandler extends Handler implements CloudTokenDataPlaneFilterConfig.Producer {
CloudTokenDataPlaneHandler() {
super(new ComponentModel("com.yahoo.jdisc.http.filter.security.cloud.CloudTokenDataPlaneHandler", null, "jdisc-security-filters", null));
addServerBindings(SystemBindingPattern.fromHttpPortAndPath(Defaults.getDefaults().vespaWebServicePort(), "/data-plane-tokens/v1"));
}
@Override public void getConfig(Builder builder) { tokenFilter.getConfig(builder); }
}
cluster.addComponent(new CloudTokenDataPlaneHandler());
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
private static final int MIN_ZOOKEEPER_NODE_COUNT = 1;
private static final int MAX_ZOOKEEPER_NODE_COUNT = 7;
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds = List.of(ConfigModelId.fromName(CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilderBase<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, TreeConfigProducer<AnyConfigProducer> ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec, deployState);
addProcessing(deployState, spec, cluster, context);
addSearch(deployState, spec, cluster, context);
addDocproc(deployState, spec, cluster);
addDocumentApi(deployState, spec, cluster, context);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec, context);
addClients(deployState, spec, cluster);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addNodes(cluster, spec, context);
addModelEvaluationRuntime(cluster);
addModelEvaluation(spec, cluster, context);
addServerProviders(deployState, spec, cluster);
if (!standaloneBuilder) cluster.addAllPlatformBundles();
addDeploymentSpecConfig(cluster, context, deployState.getDeployLogger());
addZooKeeper(cluster, spec);
addParameterStoreValidationHandler(cluster, deployState);
}
private void addParameterStoreValidationHandler(ApplicationContainerCluster cluster, DeployState deployState) {
if ( ! deployState.isHosted()) return;
cluster.addPlatformBundle(PlatformBundles.absoluteBundlePath("jdisc-cloud-aws"));
if (deployState.zone().system().isPublic()) {
BindingPattern bindingPattern = SystemBindingPattern.fromHttpPath("/validate-secret-store");
Handler handler = new Handler(
new ComponentModel("com.yahoo.jdisc.cloud.aws.AwsParameterStoreValidationHandler", null, "jdisc-cloud-aws", null));
handler.addServerBindings(bindingPattern);
cluster.addComponent(handler);
}
}
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) {
Element zooKeeper = getZooKeeper(spec);
if (zooKeeper == null) return;
Element nodesElement = XML.getChild(spec, "nodes");
boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of");
if (isCombined) {
throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper");
}
long nonRetiredNodes = cluster.getContainers().stream().filter(c -> !c.isRetired()).count();
if (nonRetiredNodes < MIN_ZOOKEEPER_NODE_COUNT || nonRetiredNodes > MAX_ZOOKEEPER_NODE_COUNT || nonRetiredNodes % 2 == 0) {
throw new IllegalArgumentException("Cluster with ZooKeeper needs an odd number of nodes, between " +
MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT +
", have " + nonRetiredNodes + " non-retired");
}
cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade");
cluster.addSimpleComponent("com.yahoo.vespa.curator.CuratorWrapper", null, "zkfacade");
String sessionTimeoutSeconds = zooKeeper.getAttribute("session-timeout-seconds");
if ( ! sessionTimeoutSeconds.isBlank()) {
try {
int timeoutSeconds = Integer.parseInt(sessionTimeoutSeconds);
if (timeoutSeconds <= 0) throw new IllegalArgumentException("must be a positive value");
cluster.setZookeeperSessionTimeoutSeconds(timeoutSeconds);
}
catch (RuntimeException e) {
throw new IllegalArgumentException("invalid zookeeper session-timeout-seconds '" + sessionTimeoutSeconds + "'", e);
}
}
cluster.getContainers().forEach(ContainerModelBuilder::addReconfigurableZooKeeperServerComponents);
}
public static void addReconfigurableZooKeeperServerComponents(Container container) {
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.Reconfigurer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", container));
}
private static SimpleComponent zookeeperComponent(String idSpec, Container container) {
String configId = container.getConfigId();
return new SimpleComponent(new ComponentModel(idSpec, null, "zookeeper-server", configId));
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec, DeployState deployState) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
String type = secretStoreElement.getAttribute("type");
if ("cloud".equals(type)) {
addCloudSecretStore(cluster, secretStoreElement, deployState);
} else {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
}
private void addCloudSecretStore(ApplicationContainerCluster cluster, Element secretStoreElement, DeployState deployState) {
if ( ! deployState.isHosted()) return;
if ( ! cluster.getZone().system().isPublic())
throw new IllegalArgumentException("Cloud secret store is not supported in non-public system, see the documentation");
CloudSecretStore cloudSecretStore = new CloudSecretStore();
Map<String, TenantSecretStore> secretStoresByName = deployState.getProperties().tenantSecretStores()
.stream()
.collect(Collectors.toMap(
TenantSecretStore::getName,
store -> store
));
Element store = XML.getChild(secretStoreElement, "store");
for (Element group : XML.getChildren(store, "aws-parameter-store")) {
String account = group.getAttribute("account");
String region = group.getAttribute("aws-region");
TenantSecretStore secretStore = secretStoresByName.get(account);
if (secretStore == null)
throw new IllegalArgumentException("No configured secret store named " + account);
if (secretStore.getExternalId().isEmpty())
throw new IllegalArgumentException("No external ID has been set");
cloudSecretStore.addConfig(account, region, secretStore.getAwsId(), secretStore.getRole(), secretStore.getExternalId().get());
}
cluster.addComponent(cloudSecretStore);
}
private void addDeploymentSpecConfig(ApplicationContainerCluster cluster, ConfigModelContext context, DeployLogger deployLogger) {
if ( ! context.getDeployState().isHosted()) return;
DeploymentSpec deploymentSpec = app.getDeploymentSpec();
if (deploymentSpec.isEmpty()) return;
for (var deprecatedElement : deploymentSpec.deprecatedElements()) {
deployLogger.logApplicationPackage(WARNING, deprecatedElement.humanReadableString());
}
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().getEndpoints());
}
private void addRotationProperties(ApplicationContainerCluster cluster, Set<ContainerEndpoint> endpoints) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", "true");
});
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.filter(endpoint -> endpoint.scope() == ApplicationClusterEndpoint.Scope.global)
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toCollection(LinkedHashSet::new));
container.setProp("rotations", String.join(",", rotationsProperty));
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element parent) {
for (Element components : XML.getChildren(parent, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, parent, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(
name + "-status-handler",
statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
SystemBindingPattern.fromHttpPath("/" + name)));
} else {
cluster.addVipHandler();
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
protected void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault()) {
cluster.addAccessLog();
} else {
if (cluster.isHostedVespa()) {
log.logApplicationPackage(WARNING, "Applications are not allowed to override the 'accesslog' element");
} else {
List<AccessLogComponent> components = new ArrayList<>();
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(accessLogComponent -> {
components.add(accessLogComponent);
cluster.addComponent(accessLogComponent);
});
}
if (components.size() > 0) {
cluster.removeSimpleComponent(VoidRequestLog.class);
cluster.addSimpleComponent(AccessLog.class);
}
}
}
if (cluster.getAllComponents().stream().anyMatch(component -> component instanceof AccessLogComponent))
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "access"));
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement, context));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addHostedImplicitAccessControlIfNotPresent(deployState, cluster);
addDefaultConnectorHostedFilterBinding(cluster);
addCloudMtlsConnector(deployState, cluster);
addCloudDataPlaneFilter(deployState, cluster);
addCloudTokenSupport(deployState, cluster);
}
}
private static void addCloudDataPlaneFilter(DeployState deployState, ApplicationContainerCluster cluster) {
if (!deployState.isHosted() || !deployState.zone().system().isPublic()) return;
var dataplanePort = getMtlsDataplanePort(deployState);
var secureChain = new HttpFilterChain("cloud-data-plane-secure", HttpFilterChain.Type.SYSTEM);
secureChain.addInnerComponent(new CloudDataPlaneFilter(cluster, deployState));
cluster.getHttp().getFilterChains().add(secureChain);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == dataplanePort).findAny().orElseThrow()
.setDefaultRequestFilterChain(secureChain.getComponentId());
var insecureChain = new HttpFilterChain("cloud-data-plane-insecure", HttpFilterChain.Type.SYSTEM);
insecureChain.addInnerComponent(new Filter(
new ChainedComponentModel(
new BundleInstantiationSpecification(
new ComponentSpecification("com.yahoo.jdisc.http.filter.security.misc.NoopFilter"),
null, new ComponentSpecification("jdisc-security-filters")),
Dependencies.emptyDependencies())));
cluster.getHttp().getFilterChains().add(insecureChain);
var insecureChainComponentSpec = new ComponentSpecification(insecureChain.getComponentId().toString());
FilterBinding insecureBinding =
FilterBinding.create(FilterBinding.Type.REQUEST, insecureChainComponentSpec, VIP_HANDLER_BINDING);
cluster.getHttp().getBindings().add(insecureBinding);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == Defaults.getDefaults().vespaWebServicePort()).findAny().orElseThrow()
.setDefaultRequestFilterChain(insecureChain.getComponentId());
}
protected void addClients(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
if (!deployState.isHosted() || !deployState.zone().system().isPublic()) return;
List<Client> clients;
Element clientsElement = XML.getChild(spec, "clients");
boolean legacyMode = false;
if (clientsElement == null) {
clients = List.of(new Client(
"default", List.of(), getCertificates(app.getFile(Path.fromString("security/clients.pem"))), List.of()));
legacyMode = true;
} else {
clients = XML.getChildren(clientsElement, "client").stream()
.flatMap(elem -> getClient(elem, deployState).stream())
.toList();
boolean atLeastOneClientWithCertificate = clients.stream().anyMatch(client -> !client.certificates().isEmpty());
if (!atLeastOneClientWithCertificate)
throw new IllegalArgumentException("At least one client must require a certificate");
}
List<X509Certificate> operatorAndTesterCertificates = deployState.getProperties().operatorCertificates();
if(!operatorAndTesterCertificates.isEmpty())
clients = Stream.concat(clients.stream(), Stream.of(Client.internalClient(operatorAndTesterCertificates))).toList();
cluster.setClients(legacyMode, clients);
}
private Optional<Client> getClient(Element clientElement, DeployState state) {
String clientId = XML.attribute("id", clientElement).orElseThrow();
if (clientId.startsWith("_"))
throw new IllegalArgumentException("Invalid client id '%s', id cannot start with '_'".formatted(clientId));
List<String> permissions = XML.attribute("permissions", clientElement)
.map(p -> p.split(",")).stream()
.flatMap(Arrays::stream)
.toList();
var certificates = XML.getChildren(clientElement, "certificate").stream()
.flatMap(certElem -> {
var file = app.getFile(Path.fromString(certElem.getAttribute("file")));
if (!file.exists()) {
throw new IllegalArgumentException("Certificate file '%s' for client '%s' does not exist"
.formatted(file.getPath().getRelative(), clientId));
}
return getCertificates(file).stream();
})
.toList();
if (!certificates.isEmpty()) return Optional.of(new Client(clientId, permissions, certificates, List.of()));
var knownTokens = state.getProperties().dataplaneTokens().stream()
.collect(Collectors.toMap(DataplaneToken::tokenId, Function.identity()));
var referencedTokens = XML.getChildren(clientElement, "token").stream()
.map(elem -> {
var tokenId = elem.getAttribute("id");
var token = knownTokens.get(tokenId);
if (token == null)
log.logApplicationPackage(
WARNING, "Token '%s' for client '%s' does not exist".formatted(tokenId, clientId));
return token;
})
.filter(token -> {
if (token == null) return false;
boolean empty = token.versions().isEmpty();
if (empty)
log.logApplicationPackage(
WARNING, "Token '%s' for client '%s' has no active versions"
.formatted(token.tokenId(), clientId));
return !empty;
})
.toList();
if (referencedTokens.isEmpty()) {
log.log(Level.INFO, "Skipping client '%s' as it does not refer to any activate tokens".formatted(clientId));
return Optional.empty();
}
return Optional.of(new Client(clientId, permissions, List.of(), referencedTokens));
}
private List<X509Certificate> getCertificates(ApplicationFile file) {
if (!file.exists()) return List.of();
try {
Reader reader = file.createReader();
String certPem = IOUtils.readAll(reader);
reader.close();
List<X509Certificate> x509Certificates = X509CertificateUtils.certificateListFromPem(certPem);
if (x509Certificates.isEmpty()) {
throw new IllegalArgumentException("File %s does not contain any certificates.".formatted(file.getPath().getRelative()));
}
return x509Certificates;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) {
cluster.getHttp().getAccessControl()
.ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp()));
}
private void addCloudMtlsConnector(DeployState state, ApplicationContainerCluster cluster) {
JettyHttpServer server = cluster.getHttp().getHttpServer().get();
String serverName = server.getComponentId().getName();
var builder = HostedSslConnectorFactory.builder(serverName, getMtlsDataplanePort(state))
.proxyProtocol(true, state.getProperties().featureFlags().enableProxyProtocolMixedMode())
.tlsCiphersOverride(state.getProperties().tlsCiphersOverride())
.endpointConnectionTtl(state.getProperties().endpointConnectionTtl());
var endpointCert = state.endpointCertificateSecrets().orElse(null);
if (endpointCert != null) {
builder.endpointCertificate(endpointCert);
boolean isPublic = state.zone().system().isPublic();
List<X509Certificate> clientCertificates = getClientCertificates(cluster);
if (isPublic) {
if (clientCertificates.isEmpty())
throw new IllegalArgumentException("Client certificate authority security/clients.pem is missing - " +
"see: https:
builder.tlsCaCertificatesPem(X509CertificateUtils.toPem(clientCertificates))
.clientAuth(SslClientAuth.WANT_WITH_ENFORCER);
} else {
builder.tlsCaCertificatesPath("/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem");
var needAuth = cluster.getHttp().getAccessControl()
.map(accessControl -> accessControl.clientAuthentication)
.map(clientAuth -> clientAuth == AccessControl.ClientAuthentication.need)
.orElse(false);
builder.clientAuth(needAuth ? SslClientAuth.NEED : SslClientAuth.WANT);
}
} else {
builder.clientAuth(SslClientAuth.WANT_WITH_ENFORCER);
}
var connectorFactory = builder.build();
cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory));
server.addConnector(connectorFactory);
}
private List<X509Certificate> getClientCertificates(ApplicationContainerCluster cluster) {
return cluster.getClients()
.stream()
.map(Client::certificates)
.flatMap(Collection::stream)
.toList();
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
return context.getDeployState().isHostedTenantApplication(context.getApplicationType());
}
private static void addHostedImplicitHttpIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
if (cluster.getHttp() == null) {
cluster.setHttp(new Http(new FilterChains(cluster)));
}
JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null);
if (httpServer == null) {
httpServer = new JettyHttpServer("DefaultHttpServer", cluster, deployState);
cluster.getHttp().setHttpServer(httpServer);
}
int defaultPort = Defaults.getDefaults().vespaWebServicePort();
boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort);
if (!defaultConnectorPresent) {
httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build());
}
}
private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
Http http = cluster.getHttp();
if (http.getAccessControl().isPresent()) return;
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
if (tenantDomain == null) return;
new AccessControl.Builder(tenantDomain.value())
.setHandlers(cluster)
.clientAuthentication(AccessControl.ClientAuthentication.need)
.build()
.configureHttpFilterChains(http);
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement, ConfigModelContext context) {
Http http = new HttpBuilder(portBindingOverride(deployState, context)).build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addDocumentApi(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(deployState, cluster, spec, context);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(deployState, cluster, searchElement, context);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
FileDistributedOnnxModels models = profiles.getOnnxModels().clone();
Element onnxElement = XML.getChild(modelEvaluationElement, "onnx");
Element modelsElement = XML.getChild(onnxElement, "models");
for (Element modelElement : XML.getChildren(modelsElement, "model") ) {
OnnxModel onnxModel = models.asMap().get(modelElement.getAttribute("name"));
if (onnxModel == null) {
String availableModels = String.join(", ", profiles.getOnnxModels().asMap().keySet());
context.getDeployState().getDeployLogger().logApplicationPackage(WARNING,
"Model '" + modelElement.getAttribute("name") + "' not found. Available ONNX " +
"models are: " + availableModels + ". Skipping this configuration.");
continue;
}
onnxModel.setStatelessExecutionMode(getStringValue(modelElement, "execution-mode", null));
onnxModel.setStatelessInterOpThreads(getIntValue(modelElement, "interop-threads", -1));
onnxModel.setStatelessIntraOpThreads(getIntValue(modelElement, "intraop-threads", -1));
Element gpuDeviceElement = XML.getChild(modelElement, "gpu-device");
if (gpuDeviceElement != null) {
int gpuDevice = Integer.parseInt(gpuDeviceElement.getTextContent());
boolean hasGpu = cluster.getContainers().stream().anyMatch(container -> container.getHostResource() != null &&
!container.getHostResource().realResources().gpuResources().isZero());
onnxModel.setGpuDevice(gpuDevice, hasGpu);
}
cluster.onnxModelCost().registerModel(context.getApplicationPackage().getFile(onnxModel.getFilePath()));
}
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles, models));
}
private String getStringValue(Element element, String name, String defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? child.getTextContent() : defaultValue;
}
private int getIntValue(Element element, String name, int defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? Integer.parseInt(child.getTextContent()) : defaultValue;
}
protected void addModelEvaluationRuntime(ApplicationContainerCluster cluster) {
/* These bundles are added to all application container clusters, even if they haven't
* declared 'model-evaluation' in services.xml, because there are many public API packages
* in the model-evaluation bundle that could be used by customer code. */
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_EVALUATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_INTEGRATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.ONNXRUNTIME_BUNDLE_FILE);
/* The ONNX runtime is always available for injection to any component */
cluster.addSimpleComponent(
ContainerModelEvaluation.ONNX_RUNTIME_CLASS, null, ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME);
/* Add runtime providing utilities such as metrics to embedder implementations */
cluster.addSimpleComponent(
"ai.vespa.embedding.EmbedderRuntime", null, ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME);
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
cluster.addSearchAndDocprocBundles();
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(deployState, context, processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder()
.build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(deployState, containerCluster, searchChains);
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster, portBindingOverride(deployState, context)).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1)))
throw new IllegalArgumentException("Expected container version to be 1.0, but got " + version);
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster, context.getDeployState());
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster, DeployState deployState) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), deployState);
cluster.addContainers(Collections.singleton(container));
}
private static String buildJvmGCOptions(ConfigModelContext context, String jvmGCOptions) {
return new JvmGcOptions(context.getDeployState(), jvmGCOptions).build();
}
private static String getJvmOptions(Element nodesElement,
DeployState deployState,
boolean legacyOptions) {
return new JvmOptions(nodesElement, deployState, legacyOptions).build();
}
private static String extractAttribute(Element element, String attrName) {
return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null;
}
private void extractJvmOptions(List<ApplicationContainer> nodes,
ApplicationContainerCluster cluster,
Element nodesElement,
ConfigModelContext context) {
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, nodesElement, jvmElement, context);
}
}
private void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(nodesElement, context.getDeployState(), true));
if (cluster.getJvmGCOptions().isEmpty()) {
String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS);
if (jvmGCOptions != null && !jvmGCOptions.isEmpty()) {
DeployLogger logger = context.getDeployState().getDeployLogger();
logger.logApplicationPackage(WARNING, "'jvm-gc-options' is deprecated and will be removed in Vespa 9." +
" Please merge into 'gc-options' in 'jvm' element." +
" See https:
}
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
if (applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)))
context.getDeployState().getDeployLogger()
.logApplicationPackage(WARNING, "'allocated-memory' is deprecated and will be removed in Vespa 9." +
" Please merge into 'allocated-memory' in 'jvm' element." +
" See https:
}
private void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(nodesElement, context.getDeployState(), false));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
/**
* Add nodes to cluster according to the given containerElement.
*
* Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set
* of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed
* simultaneously for all active config models.
*/
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context);
extractJvmOptions(nodes, cluster, nodesElement, context);
applyDefaultPreload(nodes, nodesElement);
var envVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT)).entrySet();
for (var container : nodes) {
for (var entry : envVars) {
container.addEnvironmentVariable(entry.getKey(), entry.getValue());
}
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private ZoneEndpoint zoneEndpoint(ConfigModelContext context, ClusterSpec.Id cluster) {
InstanceName instance = context.properties().applicationId().instance();
ZoneId zone = ZoneId.from(context.properties().zone().environment(),
context.properties().zone().region());
return context.getApplicationPackage().getDeploymentSpec().zoneEndpoint(instance, zone, cluster);
}
private static Map<String, String> getEnvironmentVariables(Element environmentVariables) {
var map = new LinkedHashMap<String, String>();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
var name = new com.yahoo.text.Identifier(var.getNodeName());
map.put(name.toString(), var.getTextContent());
}
}
return map;
}
private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element containerElement,
Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of")) {
List<ApplicationContainer> containers = createNodesFromContentServiceReference(cluster, nodesElement, context);
log.logApplicationPackage(WARNING, "Declaring combined cluster with <nodes of=\"...\"> is deprecated without " +
"replacement, and the feature will be removed in Vespa 9. Use separate container and " +
"content clusters instead");
return containers;
} else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private static boolean applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
try {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return false;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("Missing % sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
return true;
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign", e);
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
deployState.getDeployLogger().logApplicationPackage(Level.INFO, "Using " + nodeCount + " nodes in " + cluster);
var nodesSpec = NodesSpecification.dedicated(nodeCount, context);
ClusterSpec.Id clusterId = ClusterSpec.Id.from(cluster.getName());
var hosts = nodesSpec.provision(hostSystem,
ClusterSpec.Type.container,
clusterId,
zoneEndpoint(context, clusterId),
deployState.getDeployLogger(),
false,
context.clusterInfo().build());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
else {
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
}
}
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, context.getDeployState());
node.setHostResource(host);
node.initService(context.getDeployState());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
try {
var nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
var clusterId = ClusterSpec.Id.from(cluster.name());
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
clusterId,
zoneEndpoint(context, clusterId),
log,
getZooKeeper(containerElement) != null,
context.clusterInfo().build());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("In " + cluster, e);
}
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepository(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), log);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromHosts(Map<HostResource, ClusterMembership> hosts,
ApplicationContainerCluster cluster,
DeployState deployState) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), deployState);
container.setHostResource(entry.getKey());
container.initService(deployState);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(DeployState deployState, ApplicationContainerCluster cluster, Element searchElement, ConfigModelContext context) {
var bindingPatterns = List.<BindingPattern>of(SearchHandler.DEFAULT_BINDING);
if (isHostedTenantApplication(context)) {
bindingPatterns = SearchHandler.bindingPattern(getDataplanePorts(deployState));
}
SearchHandler searchHandler = new SearchHandler(cluster,
serverBindings(deployState, context, searchElement, bindingPatterns),
ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null));
cluster.addComponent(searchHandler);
searchHandler.addComponent(Component.fromClassAndBundle(SearchHandler.EXECUTION_FACTORY, PlatformBundles.SEARCH_AND_DOCPROC_BUNDLE));
}
private List<BindingPattern> serverBindings(DeployState deployState, ConfigModelContext context, Element searchElement, Collection<BindingPattern> defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return List.copyOf(defaultBindings);
return toBindingList(deployState, context, bindings);
}
private List<BindingPattern> toBindingList(DeployState deployState, ConfigModelContext context, List<Element> bindingElements) {
List<BindingPattern> result = new ArrayList<>();
var portOverride = isHostedTenantApplication(context) ? getDataplanePorts(deployState) : Set.<Integer>of();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.addAll(userBindingPattern(text, portOverride));
}
return result;
}
private static Collection<UserBindingPattern> userBindingPattern(String path, Set<Integer> portBindingOverride) {
UserBindingPattern bindingPattern = UserBindingPattern.fromPattern(path);
if (portBindingOverride.isEmpty()) return Set.of(bindingPattern);
return portBindingOverride.stream()
.map(bindingPattern::withOverriddenPort)
.toList();
}
private ContainerDocumentApi buildDocumentApi(DeployState deployState, ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.HandlerOptions documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
Element ignoreUndefinedFields = XML.getChild(documentApiElement, "ignore-undefined-fields");
return new ContainerDocumentApi(cluster, documentApiOptions,
"true".equals(XML.getValue(ignoreUndefinedFields)), portBindingOverride(deployState, context));
}
private Set<Integer> portBindingOverride(DeployState deployState, ConfigModelContext context) {
return isHostedTenantApplication(context)
? getDataplanePorts(deployState)
: Set.<Integer>of();
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement, deployState.getDeployLogger());
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element parent, String componentName) {
for (Element component : XML.getChildren(parent, componentName)) {
ModelIdResolver.resolveModelIds(component, deployState.isHosted());
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, component));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec,
String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.athenzService(app.getApplicationId().instance(), zone.environment(), zone.region())
.orElseThrow(() -> new IllegalArgumentException("Missing Athenz service configuration in instance '" +
app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain,
service,
getLoadBalancerName(loadBalancerName, configServerSpecs),
ztsUrl,
zoneDnsSuffix,
zone);
cluster.removeComponent(ComponentId.fromString("com.yahoo.container.jdisc.AthenzIdentityProviderProvider"));
cluster.addComponent(identityProvider);
var serviceIdentityProviderProvider = "com.yahoo.vespa.athenz.identityprovider.client.ServiceIdentityProviderProvider";
cluster.addComponent(new SimpleComponent(new ComponentModel(serviceIdentityProviderProvider, serviceIdentityProviderProvider, "vespa-athenz")));
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.of(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
private static Element getZooKeeper(Element spec) {
return XML.getChild(spec, "zookeeper");
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName());
}
/**
* Validates JVM options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system.
*/
private static class JvmOptions {
private static final Pattern validPattern = Pattern.compile("-[a-zA-z0-9=:./,+*-]+");
private static final Pattern invalidInHostedPattern = Pattern.compile("-Xrunjdwp:transport=.*");
private final Element nodesElement;
private final DeployLogger logger;
private final boolean legacyOptions;
private final boolean isHosted;
public JvmOptions(Element nodesElement, DeployState deployState, boolean legacyOptions) {
this.nodesElement = nodesElement;
this.logger = deployState.getDeployLogger();
this.legacyOptions = legacyOptions;
this.isHosted = deployState.isHosted();
}
String build() {
if (legacyOptions)
return buildLegacyOptions();
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) return "";
String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS);
if (jvmOptions.isEmpty()) return "";
validateJvmOptions(jvmOptions);
return jvmOptions;
}
String buildLegacyOptions() {
String jvmOptions = null;
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (! jvmOptions.isEmpty())
logger.logApplicationPackage(WARNING, "'jvm-options' is deprecated and will be removed in Vespa 9." +
" Please merge 'jvm-options' into 'options' or 'gc-options' in 'jvm' element." +
" See https:
}
validateJvmOptions(jvmOptions);
return jvmOptions;
}
private void validateJvmOptions(String jvmOptions) {
if (jvmOptions == null || jvmOptions.isEmpty()) return;
String[] optionList = jvmOptions.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option))
.sorted()
.collect(Collectors.toCollection(ArrayList::new));
if (isHosted)
invalidOptions.addAll(Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> Pattern.matches(invalidInHostedPattern.pattern(), option))
.sorted().toList());
if (invalidOptions.isEmpty()) return;
String message = "Invalid or misplaced JVM options in services.xml: " +
String.join(",", invalidOptions) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
/**
* Validates JVM GC options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system
* (e.g. uses CMS options for hosted Vespa, which uses JDK 17).
*/
private static class JvmGcOptions {
private static final Pattern validPattern = Pattern.compile("-XX:[+-]*[a-zA-z0-9=]+");
private static final Pattern invalidCMSPattern = Pattern.compile("-XX:[+-]\\w*CMS[a-zA-z0-9=]+");
private final DeployState deployState;
private final String jvmGcOptions;
private final DeployLogger logger;
private final boolean isHosted;
public JvmGcOptions(DeployState deployState, String jvmGcOptions) {
this.deployState = deployState;
this.jvmGcOptions = jvmGcOptions;
this.logger = deployState.getDeployLogger();
this.isHosted = deployState.isHosted();
}
private String build() {
String options = deployState.getProperties().jvmGCOptions();
if (jvmGcOptions != null) {
options = jvmGcOptions;
String[] optionList = options.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option)
|| Pattern.matches(invalidCMSPattern.pattern(), option)
|| option.equals("-XX:+UseConcMarkSweepGC"))
.sorted()
.toList();
logOrFailInvalidOptions(invalidOptions);
}
if (options == null || options.isEmpty())
options = deployState.isHosted() ? ContainerCluster.PARALLEL_GC : ContainerCluster.G1GC;
return options;
}
private void logOrFailInvalidOptions(List<String> options) {
if (options.isEmpty()) return;
String message = "Invalid or misplaced JVM GC options in services.xml: " +
String.join(",", options) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
private static Set<Integer> getDataplanePorts(DeployState ds) {
var tokenPort = getTokenDataplanePort(ds);
var mtlsPort = getMtlsDataplanePort(ds);
return tokenPort.isPresent() ? Set.of(mtlsPort, tokenPort.getAsInt()) : Set.of(mtlsPort);
}
private static int getMtlsDataplanePort(DeployState ds) {
return enableTokenSupport(ds) ? 8443 : 4443;
}
private static OptionalInt getTokenDataplanePort(DeployState ds) {
return enableTokenSupport(ds) ? OptionalInt.of(8444) : OptionalInt.empty();
}
private static Set<ContainerEndpoint> tokenEndpoints(DeployState deployState) {
return deployState.getEndpoints().stream()
.filter(endpoint -> endpoint.authMethod() == ApplicationClusterEndpoint.AuthMethod.token)
.collect(Collectors.toSet());
}
private static boolean enableTokenSupport(DeployState state) {
Set<ContainerEndpoint> tokenEndpoints = tokenEndpoints(state);
return state.isHosted() && state.zone().system().isPublic() && ! tokenEndpoints.isEmpty();
}
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
private static final int MIN_ZOOKEEPER_NODE_COUNT = 1;
private static final int MAX_ZOOKEEPER_NODE_COUNT = 7;
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds = List.of(ConfigModelId.fromName(CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilderBase<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, TreeConfigProducer<AnyConfigProducer> ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec, deployState);
addProcessing(deployState, spec, cluster, context);
addSearch(deployState, spec, cluster, context);
addDocproc(deployState, spec, cluster);
addDocumentApi(deployState, spec, cluster, context);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec, context);
addClients(deployState, spec, cluster);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addNodes(cluster, spec, context);
addModelEvaluationRuntime(cluster);
addModelEvaluation(spec, cluster, context);
addServerProviders(deployState, spec, cluster);
if (!standaloneBuilder) cluster.addAllPlatformBundles();
addDeploymentSpecConfig(cluster, context, deployState.getDeployLogger());
addZooKeeper(cluster, spec);
addParameterStoreValidationHandler(cluster, deployState);
}
private void addParameterStoreValidationHandler(ApplicationContainerCluster cluster, DeployState deployState) {
if ( ! deployState.isHosted()) return;
cluster.addPlatformBundle(PlatformBundles.absoluteBundlePath("jdisc-cloud-aws"));
if (deployState.zone().system().isPublic()) {
BindingPattern bindingPattern = SystemBindingPattern.fromHttpPath("/validate-secret-store");
Handler handler = new Handler(
new ComponentModel("com.yahoo.jdisc.cloud.aws.AwsParameterStoreValidationHandler", null, "jdisc-cloud-aws", null));
handler.addServerBindings(bindingPattern);
cluster.addComponent(handler);
}
}
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) {
Element zooKeeper = getZooKeeper(spec);
if (zooKeeper == null) return;
Element nodesElement = XML.getChild(spec, "nodes");
boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of");
if (isCombined) {
throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper");
}
long nonRetiredNodes = cluster.getContainers().stream().filter(c -> !c.isRetired()).count();
if (nonRetiredNodes < MIN_ZOOKEEPER_NODE_COUNT || nonRetiredNodes > MAX_ZOOKEEPER_NODE_COUNT || nonRetiredNodes % 2 == 0) {
throw new IllegalArgumentException("Cluster with ZooKeeper needs an odd number of nodes, between " +
MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT +
", have " + nonRetiredNodes + " non-retired");
}
cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade");
cluster.addSimpleComponent("com.yahoo.vespa.curator.CuratorWrapper", null, "zkfacade");
String sessionTimeoutSeconds = zooKeeper.getAttribute("session-timeout-seconds");
if ( ! sessionTimeoutSeconds.isBlank()) {
try {
int timeoutSeconds = Integer.parseInt(sessionTimeoutSeconds);
if (timeoutSeconds <= 0) throw new IllegalArgumentException("must be a positive value");
cluster.setZookeeperSessionTimeoutSeconds(timeoutSeconds);
}
catch (RuntimeException e) {
throw new IllegalArgumentException("invalid zookeeper session-timeout-seconds '" + sessionTimeoutSeconds + "'", e);
}
}
cluster.getContainers().forEach(ContainerModelBuilder::addReconfigurableZooKeeperServerComponents);
}
public static void addReconfigurableZooKeeperServerComponents(Container container) {
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.Reconfigurer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", container));
}
private static SimpleComponent zookeeperComponent(String idSpec, Container container) {
String configId = container.getConfigId();
return new SimpleComponent(new ComponentModel(idSpec, null, "zookeeper-server", configId));
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec, DeployState deployState) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
String type = secretStoreElement.getAttribute("type");
if ("cloud".equals(type)) {
addCloudSecretStore(cluster, secretStoreElement, deployState);
} else {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
}
private void addCloudSecretStore(ApplicationContainerCluster cluster, Element secretStoreElement, DeployState deployState) {
if ( ! deployState.isHosted()) return;
if ( ! cluster.getZone().system().isPublic())
throw new IllegalArgumentException("Cloud secret store is not supported in non-public system, see the documentation");
CloudSecretStore cloudSecretStore = new CloudSecretStore();
Map<String, TenantSecretStore> secretStoresByName = deployState.getProperties().tenantSecretStores()
.stream()
.collect(Collectors.toMap(
TenantSecretStore::getName,
store -> store
));
Element store = XML.getChild(secretStoreElement, "store");
for (Element group : XML.getChildren(store, "aws-parameter-store")) {
String account = group.getAttribute("account");
String region = group.getAttribute("aws-region");
TenantSecretStore secretStore = secretStoresByName.get(account);
if (secretStore == null)
throw new IllegalArgumentException("No configured secret store named " + account);
if (secretStore.getExternalId().isEmpty())
throw new IllegalArgumentException("No external ID has been set");
cloudSecretStore.addConfig(account, region, secretStore.getAwsId(), secretStore.getRole(), secretStore.getExternalId().get());
}
cluster.addComponent(cloudSecretStore);
}
private void addDeploymentSpecConfig(ApplicationContainerCluster cluster, ConfigModelContext context, DeployLogger deployLogger) {
if ( ! context.getDeployState().isHosted()) return;
DeploymentSpec deploymentSpec = app.getDeploymentSpec();
if (deploymentSpec.isEmpty()) return;
for (var deprecatedElement : deploymentSpec.deprecatedElements()) {
deployLogger.logApplicationPackage(WARNING, deprecatedElement.humanReadableString());
}
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().getEndpoints());
}
private void addRotationProperties(ApplicationContainerCluster cluster, Set<ContainerEndpoint> endpoints) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", "true");
});
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.filter(endpoint -> endpoint.scope() == ApplicationClusterEndpoint.Scope.global)
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toCollection(LinkedHashSet::new));
container.setProp("rotations", String.join(",", rotationsProperty));
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element parent) {
for (Element components : XML.getChildren(parent, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, parent, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(
name + "-status-handler",
statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
SystemBindingPattern.fromHttpPath("/" + name)));
} else {
cluster.addVipHandler();
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
protected void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault()) {
cluster.addAccessLog();
} else {
if (cluster.isHostedVespa()) {
log.logApplicationPackage(WARNING, "Applications are not allowed to override the 'accesslog' element");
} else {
List<AccessLogComponent> components = new ArrayList<>();
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(accessLogComponent -> {
components.add(accessLogComponent);
cluster.addComponent(accessLogComponent);
});
}
if (components.size() > 0) {
cluster.removeSimpleComponent(VoidRequestLog.class);
cluster.addSimpleComponent(AccessLog.class);
}
}
}
if (cluster.getAllComponents().stream().anyMatch(component -> component instanceof AccessLogComponent))
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "access"));
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement, context));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addHostedImplicitAccessControlIfNotPresent(deployState, cluster);
addDefaultConnectorHostedFilterBinding(cluster);
addCloudMtlsConnector(deployState, cluster);
addCloudDataPlaneFilter(deployState, cluster);
addCloudTokenSupport(deployState, cluster);
}
}
private static void addCloudDataPlaneFilter(DeployState deployState, ApplicationContainerCluster cluster) {
if (!deployState.isHosted() || !deployState.zone().system().isPublic()) return;
var dataplanePort = getMtlsDataplanePort(deployState);
var secureChain = new HttpFilterChain("cloud-data-plane-secure", HttpFilterChain.Type.SYSTEM);
secureChain.addInnerComponent(new CloudDataPlaneFilter(cluster, deployState));
cluster.getHttp().getFilterChains().add(secureChain);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == dataplanePort).findAny().orElseThrow()
.setDefaultRequestFilterChain(secureChain.getComponentId());
var insecureChain = new HttpFilterChain("cloud-data-plane-insecure", HttpFilterChain.Type.SYSTEM);
insecureChain.addInnerComponent(new Filter(
new ChainedComponentModel(
new BundleInstantiationSpecification(
new ComponentSpecification("com.yahoo.jdisc.http.filter.security.misc.NoopFilter"),
null, new ComponentSpecification("jdisc-security-filters")),
Dependencies.emptyDependencies())));
cluster.getHttp().getFilterChains().add(insecureChain);
var insecureChainComponentSpec = new ComponentSpecification(insecureChain.getComponentId().toString());
FilterBinding insecureBinding =
FilterBinding.create(FilterBinding.Type.REQUEST, insecureChainComponentSpec, VIP_HANDLER_BINDING);
cluster.getHttp().getBindings().add(insecureBinding);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == Defaults.getDefaults().vespaWebServicePort()).findAny().orElseThrow()
.setDefaultRequestFilterChain(insecureChain.getComponentId());
}
protected void addClients(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
if (!deployState.isHosted() || !deployState.zone().system().isPublic()) return;
List<Client> clients;
Element clientsElement = XML.getChild(spec, "clients");
boolean legacyMode = false;
if (clientsElement == null) {
clients = List.of(new Client(
"default", List.of(), getCertificates(app.getFile(Path.fromString("security/clients.pem"))), List.of()));
legacyMode = true;
} else {
clients = XML.getChildren(clientsElement, "client").stream()
.flatMap(elem -> getClient(elem, deployState).stream())
.toList();
boolean atLeastOneClientWithCertificate = clients.stream().anyMatch(client -> !client.certificates().isEmpty());
if (!atLeastOneClientWithCertificate)
throw new IllegalArgumentException("At least one client must require a certificate");
}
List<X509Certificate> operatorAndTesterCertificates = deployState.getProperties().operatorCertificates();
if(!operatorAndTesterCertificates.isEmpty())
clients = Stream.concat(clients.stream(), Stream.of(Client.internalClient(operatorAndTesterCertificates))).toList();
cluster.setClients(legacyMode, clients);
}
private Optional<Client> getClient(Element clientElement, DeployState state) {
String clientId = XML.attribute("id", clientElement).orElseThrow();
if (clientId.startsWith("_"))
throw new IllegalArgumentException("Invalid client id '%s', id cannot start with '_'".formatted(clientId));
List<String> permissions = XML.attribute("permissions", clientElement)
.map(p -> p.split(",")).stream()
.flatMap(Arrays::stream)
.toList();
var certificates = XML.getChildren(clientElement, "certificate").stream()
.flatMap(certElem -> {
var file = app.getFile(Path.fromString(certElem.getAttribute("file")));
if (!file.exists()) {
throw new IllegalArgumentException("Certificate file '%s' for client '%s' does not exist"
.formatted(file.getPath().getRelative(), clientId));
}
return getCertificates(file).stream();
})
.toList();
if (!certificates.isEmpty()) return Optional.of(new Client(clientId, permissions, certificates, List.of()));
var knownTokens = state.getProperties().dataplaneTokens().stream()
.collect(Collectors.toMap(DataplaneToken::tokenId, Function.identity()));
var referencedTokens = XML.getChildren(clientElement, "token").stream()
.map(elem -> {
var tokenId = elem.getAttribute("id");
var token = knownTokens.get(tokenId);
if (token == null)
log.logApplicationPackage(
WARNING, "Token '%s' for client '%s' does not exist".formatted(tokenId, clientId));
return token;
})
.filter(token -> {
if (token == null) return false;
boolean empty = token.versions().isEmpty();
if (empty)
log.logApplicationPackage(
WARNING, "Token '%s' for client '%s' has no active versions"
.formatted(token.tokenId(), clientId));
return !empty;
})
.toList();
if (referencedTokens.isEmpty()) {
log.log(Level.INFO, "Skipping client '%s' as it does not refer to any activate tokens".formatted(clientId));
return Optional.empty();
}
return Optional.of(new Client(clientId, permissions, List.of(), referencedTokens));
}
private List<X509Certificate> getCertificates(ApplicationFile file) {
if (!file.exists()) return List.of();
try {
Reader reader = file.createReader();
String certPem = IOUtils.readAll(reader);
reader.close();
List<X509Certificate> x509Certificates = X509CertificateUtils.certificateListFromPem(certPem);
if (x509Certificates.isEmpty()) {
throw new IllegalArgumentException("File %s does not contain any certificates.".formatted(file.getPath().getRelative()));
}
return x509Certificates;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) {
cluster.getHttp().getAccessControl()
.ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp()));
}
private void addCloudMtlsConnector(DeployState state, ApplicationContainerCluster cluster) {
JettyHttpServer server = cluster.getHttp().getHttpServer().get();
String serverName = server.getComponentId().getName();
var builder = HostedSslConnectorFactory.builder(serverName, getMtlsDataplanePort(state))
.proxyProtocol(true, state.getProperties().featureFlags().enableProxyProtocolMixedMode())
.tlsCiphersOverride(state.getProperties().tlsCiphersOverride())
.endpointConnectionTtl(state.getProperties().endpointConnectionTtl());
var endpointCert = state.endpointCertificateSecrets().orElse(null);
if (endpointCert != null) {
builder.endpointCertificate(endpointCert);
boolean isPublic = state.zone().system().isPublic();
List<X509Certificate> clientCertificates = getClientCertificates(cluster);
if (isPublic) {
if (clientCertificates.isEmpty())
throw new IllegalArgumentException("Client certificate authority security/clients.pem is missing - " +
"see: https:
builder.tlsCaCertificatesPem(X509CertificateUtils.toPem(clientCertificates))
.clientAuth(SslClientAuth.WANT_WITH_ENFORCER);
} else {
builder.tlsCaCertificatesPath("/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem");
var needAuth = cluster.getHttp().getAccessControl()
.map(accessControl -> accessControl.clientAuthentication)
.map(clientAuth -> clientAuth == AccessControl.ClientAuthentication.need)
.orElse(false);
builder.clientAuth(needAuth ? SslClientAuth.NEED : SslClientAuth.WANT);
}
} else {
builder.clientAuth(SslClientAuth.WANT_WITH_ENFORCER);
}
var connectorFactory = builder.build();
cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory));
server.addConnector(connectorFactory);
}
private List<X509Certificate> getClientCertificates(ApplicationContainerCluster cluster) {
return cluster.getClients()
.stream()
.map(Client::certificates)
.flatMap(Collection::stream)
.toList();
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
return context.getDeployState().isHostedTenantApplication(context.getApplicationType());
}
private static void addHostedImplicitHttpIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
if (cluster.getHttp() == null) {
cluster.setHttp(new Http(new FilterChains(cluster)));
}
JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null);
if (httpServer == null) {
httpServer = new JettyHttpServer("DefaultHttpServer", cluster, deployState);
cluster.getHttp().setHttpServer(httpServer);
}
int defaultPort = Defaults.getDefaults().vespaWebServicePort();
boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort);
if (!defaultConnectorPresent) {
httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build());
}
}
private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
Http http = cluster.getHttp();
if (http.getAccessControl().isPresent()) return;
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
if (tenantDomain == null) return;
new AccessControl.Builder(tenantDomain.value())
.setHandlers(cluster)
.clientAuthentication(AccessControl.ClientAuthentication.need)
.build()
.configureHttpFilterChains(http);
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement, ConfigModelContext context) {
Http http = new HttpBuilder(portBindingOverride(deployState, context)).build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addDocumentApi(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(deployState, cluster, spec, context);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(deployState, cluster, searchElement, context);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
FileDistributedOnnxModels models = profiles.getOnnxModels().clone();
Element onnxElement = XML.getChild(modelEvaluationElement, "onnx");
Element modelsElement = XML.getChild(onnxElement, "models");
for (Element modelElement : XML.getChildren(modelsElement, "model") ) {
OnnxModel onnxModel = models.asMap().get(modelElement.getAttribute("name"));
if (onnxModel == null) {
String availableModels = String.join(", ", profiles.getOnnxModels().asMap().keySet());
context.getDeployState().getDeployLogger().logApplicationPackage(WARNING,
"Model '" + modelElement.getAttribute("name") + "' not found. Available ONNX " +
"models are: " + availableModels + ". Skipping this configuration.");
continue;
}
onnxModel.setStatelessExecutionMode(getStringValue(modelElement, "execution-mode", null));
onnxModel.setStatelessInterOpThreads(getIntValue(modelElement, "interop-threads", -1));
onnxModel.setStatelessIntraOpThreads(getIntValue(modelElement, "intraop-threads", -1));
Element gpuDeviceElement = XML.getChild(modelElement, "gpu-device");
if (gpuDeviceElement != null) {
int gpuDevice = Integer.parseInt(gpuDeviceElement.getTextContent());
boolean hasGpu = cluster.getContainers().stream().anyMatch(container -> container.getHostResource() != null &&
!container.getHostResource().realResources().gpuResources().isZero());
onnxModel.setGpuDevice(gpuDevice, hasGpu);
}
cluster.onnxModelCost().registerModel(context.getApplicationPackage().getFile(onnxModel.getFilePath()));
}
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles, models));
}
private String getStringValue(Element element, String name, String defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? child.getTextContent() : defaultValue;
}
private int getIntValue(Element element, String name, int defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? Integer.parseInt(child.getTextContent()) : defaultValue;
}
protected void addModelEvaluationRuntime(ApplicationContainerCluster cluster) {
/* These bundles are added to all application container clusters, even if they haven't
* declared 'model-evaluation' in services.xml, because there are many public API packages
* in the model-evaluation bundle that could be used by customer code. */
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_EVALUATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_INTEGRATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.ONNXRUNTIME_BUNDLE_FILE);
/* The ONNX runtime is always available for injection to any component */
cluster.addSimpleComponent(
ContainerModelEvaluation.ONNX_RUNTIME_CLASS, null, ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME);
/* Add runtime providing utilities such as metrics to embedder implementations */
cluster.addSimpleComponent(
"ai.vespa.embedding.EmbedderRuntime", null, ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME);
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
cluster.addSearchAndDocprocBundles();
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(deployState, context, processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder()
.build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(deployState, containerCluster, searchChains);
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster, portBindingOverride(deployState, context)).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1)))
throw new IllegalArgumentException("Expected container version to be 1.0, but got " + version);
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster, context.getDeployState());
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster, DeployState deployState) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), deployState);
cluster.addContainers(Collections.singleton(container));
}
private static String buildJvmGCOptions(ConfigModelContext context, String jvmGCOptions) {
return new JvmGcOptions(context.getDeployState(), jvmGCOptions).build();
}
private static String getJvmOptions(Element nodesElement,
DeployState deployState,
boolean legacyOptions) {
return new JvmOptions(nodesElement, deployState, legacyOptions).build();
}
private static String extractAttribute(Element element, String attrName) {
return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null;
}
private void extractJvmOptions(List<ApplicationContainer> nodes,
ApplicationContainerCluster cluster,
Element nodesElement,
ConfigModelContext context) {
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, nodesElement, jvmElement, context);
}
}
private void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(nodesElement, context.getDeployState(), true));
if (cluster.getJvmGCOptions().isEmpty()) {
String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS);
if (jvmGCOptions != null && !jvmGCOptions.isEmpty()) {
DeployLogger logger = context.getDeployState().getDeployLogger();
logger.logApplicationPackage(WARNING, "'jvm-gc-options' is deprecated and will be removed in Vespa 9." +
" Please merge into 'gc-options' in 'jvm' element." +
" See https:
}
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
if (applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)))
context.getDeployState().getDeployLogger()
.logApplicationPackage(WARNING, "'allocated-memory' is deprecated and will be removed in Vespa 9." +
" Please merge into 'allocated-memory' in 'jvm' element." +
" See https:
}
private void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(nodesElement, context.getDeployState(), false));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
/**
* Add nodes to cluster according to the given containerElement.
*
* Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set
* of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed
* simultaneously for all active config models.
*/
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context);
extractJvmOptions(nodes, cluster, nodesElement, context);
applyDefaultPreload(nodes, nodesElement);
var envVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT)).entrySet();
for (var container : nodes) {
for (var entry : envVars) {
container.addEnvironmentVariable(entry.getKey(), entry.getValue());
}
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private ZoneEndpoint zoneEndpoint(ConfigModelContext context, ClusterSpec.Id cluster) {
InstanceName instance = context.properties().applicationId().instance();
ZoneId zone = ZoneId.from(context.properties().zone().environment(),
context.properties().zone().region());
return context.getApplicationPackage().getDeploymentSpec().zoneEndpoint(instance, zone, cluster);
}
private static Map<String, String> getEnvironmentVariables(Element environmentVariables) {
var map = new LinkedHashMap<String, String>();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
var name = new com.yahoo.text.Identifier(var.getNodeName());
map.put(name.toString(), var.getTextContent());
}
}
return map;
}
private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element containerElement,
Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of")) {
List<ApplicationContainer> containers = createNodesFromContentServiceReference(cluster, nodesElement, context);
log.logApplicationPackage(WARNING, "Declaring combined cluster with <nodes of=\"...\"> is deprecated without " +
"replacement, and the feature will be removed in Vespa 9. Use separate container and " +
"content clusters instead");
return containers;
} else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private static boolean applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
try {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return false;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("Missing % sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
return true;
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign", e);
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
deployState.getDeployLogger().logApplicationPackage(Level.INFO, "Using " + nodeCount + " nodes in " + cluster);
var nodesSpec = NodesSpecification.dedicated(nodeCount, context);
ClusterSpec.Id clusterId = ClusterSpec.Id.from(cluster.getName());
var hosts = nodesSpec.provision(hostSystem,
ClusterSpec.Type.container,
clusterId,
zoneEndpoint(context, clusterId),
deployState.getDeployLogger(),
false,
context.clusterInfo().build());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
else {
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
}
}
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, context.getDeployState());
node.setHostResource(host);
node.initService(context.getDeployState());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
try {
var nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
var clusterId = ClusterSpec.Id.from(cluster.name());
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
clusterId,
zoneEndpoint(context, clusterId),
log,
getZooKeeper(containerElement) != null,
context.clusterInfo().build());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("In " + cluster, e);
}
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepository(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), log);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromHosts(Map<HostResource, ClusterMembership> hosts,
ApplicationContainerCluster cluster,
DeployState deployState) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), deployState);
container.setHostResource(entry.getKey());
container.initService(deployState);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(DeployState deployState, ApplicationContainerCluster cluster, Element searchElement, ConfigModelContext context) {
var bindingPatterns = List.<BindingPattern>of(SearchHandler.DEFAULT_BINDING);
if (isHostedTenantApplication(context)) {
bindingPatterns = SearchHandler.bindingPattern(getDataplanePorts(deployState));
}
SearchHandler searchHandler = new SearchHandler(cluster,
serverBindings(deployState, context, searchElement, bindingPatterns),
ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null));
cluster.addComponent(searchHandler);
searchHandler.addComponent(Component.fromClassAndBundle(SearchHandler.EXECUTION_FACTORY, PlatformBundles.SEARCH_AND_DOCPROC_BUNDLE));
}
private List<BindingPattern> serverBindings(DeployState deployState, ConfigModelContext context, Element searchElement, Collection<BindingPattern> defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return List.copyOf(defaultBindings);
return toBindingList(deployState, context, bindings);
}
private List<BindingPattern> toBindingList(DeployState deployState, ConfigModelContext context, List<Element> bindingElements) {
List<BindingPattern> result = new ArrayList<>();
var portOverride = isHostedTenantApplication(context) ? getDataplanePorts(deployState) : Set.<Integer>of();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.addAll(userBindingPattern(text, portOverride));
}
return result;
}
private static Collection<UserBindingPattern> userBindingPattern(String path, Set<Integer> portBindingOverride) {
UserBindingPattern bindingPattern = UserBindingPattern.fromPattern(path);
if (portBindingOverride.isEmpty()) return Set.of(bindingPattern);
return portBindingOverride.stream()
.map(bindingPattern::withOverriddenPort)
.toList();
}
private ContainerDocumentApi buildDocumentApi(DeployState deployState, ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.HandlerOptions documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
Element ignoreUndefinedFields = XML.getChild(documentApiElement, "ignore-undefined-fields");
return new ContainerDocumentApi(cluster, documentApiOptions,
"true".equals(XML.getValue(ignoreUndefinedFields)), portBindingOverride(deployState, context));
}
private Set<Integer> portBindingOverride(DeployState deployState, ConfigModelContext context) {
return isHostedTenantApplication(context)
? getDataplanePorts(deployState)
: Set.<Integer>of();
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement, deployState.getDeployLogger());
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element parent, String componentName) {
for (Element component : XML.getChildren(parent, componentName)) {
ModelIdResolver.resolveModelIds(component, deployState.isHosted());
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, component));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec,
String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.athenzService(app.getApplicationId().instance(), zone.environment(), zone.region())
.orElseThrow(() -> new IllegalArgumentException("Missing Athenz service configuration in instance '" +
app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain,
service,
getLoadBalancerName(loadBalancerName, configServerSpecs),
ztsUrl,
zoneDnsSuffix,
zone);
cluster.removeComponent(ComponentId.fromString("com.yahoo.container.jdisc.AthenzIdentityProviderProvider"));
cluster.addComponent(identityProvider);
var serviceIdentityProviderProvider = "com.yahoo.vespa.athenz.identityprovider.client.ServiceIdentityProviderProvider";
cluster.addComponent(new SimpleComponent(new ComponentModel(serviceIdentityProviderProvider, serviceIdentityProviderProvider, "vespa-athenz")));
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.of(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
private static Element getZooKeeper(Element spec) {
return XML.getChild(spec, "zookeeper");
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName());
}
/**
* Validates JVM options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system.
*/
private static class JvmOptions {
private static final Pattern validPattern = Pattern.compile("-[a-zA-z0-9=:./,+*-]+");
private static final Pattern invalidInHostedPattern = Pattern.compile("-Xrunjdwp:transport=.*");
private final Element nodesElement;
private final DeployLogger logger;
private final boolean legacyOptions;
private final boolean isHosted;
public JvmOptions(Element nodesElement, DeployState deployState, boolean legacyOptions) {
this.nodesElement = nodesElement;
this.logger = deployState.getDeployLogger();
this.legacyOptions = legacyOptions;
this.isHosted = deployState.isHosted();
}
String build() {
if (legacyOptions)
return buildLegacyOptions();
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) return "";
String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS);
if (jvmOptions.isEmpty()) return "";
validateJvmOptions(jvmOptions);
return jvmOptions;
}
String buildLegacyOptions() {
String jvmOptions = null;
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (! jvmOptions.isEmpty())
logger.logApplicationPackage(WARNING, "'jvm-options' is deprecated and will be removed in Vespa 9." +
" Please merge 'jvm-options' into 'options' or 'gc-options' in 'jvm' element." +
" See https:
}
validateJvmOptions(jvmOptions);
return jvmOptions;
}
private void validateJvmOptions(String jvmOptions) {
if (jvmOptions == null || jvmOptions.isEmpty()) return;
String[] optionList = jvmOptions.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option))
.sorted()
.collect(Collectors.toCollection(ArrayList::new));
if (isHosted)
invalidOptions.addAll(Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> Pattern.matches(invalidInHostedPattern.pattern(), option))
.sorted().toList());
if (invalidOptions.isEmpty()) return;
String message = "Invalid or misplaced JVM options in services.xml: " +
String.join(",", invalidOptions) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
/**
* Validates JVM GC options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system
* (e.g. uses CMS options for hosted Vespa, which uses JDK 17).
*/
private static class JvmGcOptions {
private static final Pattern validPattern = Pattern.compile("-XX:[+-]*[a-zA-z0-9=]+");
private static final Pattern invalidCMSPattern = Pattern.compile("-XX:[+-]\\w*CMS[a-zA-z0-9=]+");
private final DeployState deployState;
private final String jvmGcOptions;
private final DeployLogger logger;
private final boolean isHosted;
public JvmGcOptions(DeployState deployState, String jvmGcOptions) {
this.deployState = deployState;
this.jvmGcOptions = jvmGcOptions;
this.logger = deployState.getDeployLogger();
this.isHosted = deployState.isHosted();
}
private String build() {
String options = deployState.getProperties().jvmGCOptions();
if (jvmGcOptions != null) {
options = jvmGcOptions;
String[] optionList = options.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option)
|| Pattern.matches(invalidCMSPattern.pattern(), option)
|| option.equals("-XX:+UseConcMarkSweepGC"))
.sorted()
.toList();
logOrFailInvalidOptions(invalidOptions);
}
if (options == null || options.isEmpty())
options = deployState.isHosted() ? ContainerCluster.PARALLEL_GC : ContainerCluster.G1GC;
return options;
}
private void logOrFailInvalidOptions(List<String> options) {
if (options.isEmpty()) return;
String message = "Invalid or misplaced JVM GC options in services.xml: " +
String.join(",", options) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
private static Set<Integer> getDataplanePorts(DeployState ds) {
var tokenPort = getTokenDataplanePort(ds);
var mtlsPort = getMtlsDataplanePort(ds);
return tokenPort.isPresent() ? Set.of(mtlsPort, tokenPort.getAsInt()) : Set.of(mtlsPort);
}
private static int getMtlsDataplanePort(DeployState ds) {
return enableTokenSupport(ds) ? 8443 : 4443;
}
private static OptionalInt getTokenDataplanePort(DeployState ds) {
return enableTokenSupport(ds) ? OptionalInt.of(8444) : OptionalInt.empty();
}
private static Set<ContainerEndpoint> tokenEndpoints(DeployState deployState) {
return deployState.getEndpoints().stream()
.filter(endpoint -> endpoint.authMethod() == ApplicationClusterEndpoint.AuthMethod.token)
.collect(Collectors.toSet());
}
private static boolean enableTokenSupport(DeployState state) {
Set<ContainerEndpoint> tokenEndpoints = tokenEndpoints(state);
return state.isHosted() && state.zone().system().isPublic() && ! tokenEndpoints.isEmpty();
}
} |
Sure. | private void addCloudTokenSupport(DeployState state, ApplicationContainerCluster cluster) {
var server = cluster.getHttp().getHttpServer().get();
if (!enableTokenSupport(state)) return;
Set<String> tokenEndpoints = tokenEndpoints(state).stream()
.map(ContainerEndpoint::names)
.flatMap(Collection::stream)
.collect(Collectors.toSet());
var endpointCert = state.endpointCertificateSecrets().orElseThrow();
int tokenPort = getTokenDataplanePort(state).orElseThrow();
cluster.addSimpleComponent(DataplaneProxyCredentials.class);
cluster.addSimpleComponent(DataplaneProxyService.class);
var dataplaneProxy = new DataplaneProxy(
getMtlsDataplanePort(state),
tokenPort,
endpointCert.certificate(),
endpointCert.key(),
tokenEndpoints);
cluster.addComponent(dataplaneProxy);
var connector = HostedSslConnectorFactory.builder(server.getComponentId().getName()+"-token", tokenPort)
.tokenEndpoint(true)
.proxyProtocol(false, false)
.endpointCertificate(endpointCert)
.remoteAddressHeader("X-Forwarded-For")
.remotePortHeader("X-Forwarded-Port")
.clientAuth(SslClientAuth.NEED)
.build();
server.addConnector(connector);
var tokenChain = new HttpFilterChain("cloud-token-data-plane-secure", HttpFilterChain.Type.SYSTEM);
var tokenFilter = new CloudTokenDataPlaneFilter(cluster, state);
tokenChain.addInnerComponent(tokenFilter);
cluster.getHttp().getFilterChains().add(tokenChain);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == tokenPort).findAny().orElseThrow()
.setDefaultRequestFilterChain(tokenChain.getComponentId());
class CloudTokenDataPlaneHandler extends Handler implements CloudTokenDataPlaneFilterConfig.Producer {
CloudTokenDataPlaneHandler() {
super(new ComponentModel("com.yahoo.jdisc.http.filter.security.cloud.CloudTokenDataPlaneHandler", null, "jdisc-security-filters", null));
addServerBindings(SystemBindingPattern.fromHttpPortAndPath(Defaults.getDefaults().vespaWebServicePort(), "cloud-token-data-plane-fingerprints"));
}
@Override public void getConfig(Builder builder) { tokenFilter.getConfig(builder); }
}
cluster.addComponent(new CloudTokenDataPlaneHandler());
} | addServerBindings(SystemBindingPattern.fromHttpPortAndPath(Defaults.getDefaults().vespaWebServicePort(), "cloud-token-data-plane-fingerprints")); | private void addCloudTokenSupport(DeployState state, ApplicationContainerCluster cluster) {
var server = cluster.getHttp().getHttpServer().get();
if (!enableTokenSupport(state)) return;
Set<String> tokenEndpoints = tokenEndpoints(state).stream()
.map(ContainerEndpoint::names)
.flatMap(Collection::stream)
.collect(Collectors.toSet());
var endpointCert = state.endpointCertificateSecrets().orElseThrow();
int tokenPort = getTokenDataplanePort(state).orElseThrow();
cluster.addSimpleComponent(DataplaneProxyCredentials.class);
cluster.addSimpleComponent(DataplaneProxyService.class);
var dataplaneProxy = new DataplaneProxy(
getMtlsDataplanePort(state),
tokenPort,
endpointCert.certificate(),
endpointCert.key(),
tokenEndpoints);
cluster.addComponent(dataplaneProxy);
var connector = HostedSslConnectorFactory.builder(server.getComponentId().getName()+"-token", tokenPort)
.tokenEndpoint(true)
.proxyProtocol(false, false)
.endpointCertificate(endpointCert)
.remoteAddressHeader("X-Forwarded-For")
.remotePortHeader("X-Forwarded-Port")
.clientAuth(SslClientAuth.NEED)
.build();
server.addConnector(connector);
var tokenChain = new HttpFilterChain("cloud-token-data-plane-secure", HttpFilterChain.Type.SYSTEM);
var tokenFilter = new CloudTokenDataPlaneFilter(cluster, state);
tokenChain.addInnerComponent(tokenFilter);
cluster.getHttp().getFilterChains().add(tokenChain);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == tokenPort).findAny().orElseThrow()
.setDefaultRequestFilterChain(tokenChain.getComponentId());
class CloudTokenDataPlaneHandler extends Handler implements CloudTokenDataPlaneFilterConfig.Producer {
CloudTokenDataPlaneHandler() {
super(new ComponentModel("com.yahoo.jdisc.http.filter.security.cloud.CloudTokenDataPlaneHandler", null, "jdisc-security-filters", null));
addServerBindings(SystemBindingPattern.fromHttpPortAndPath(Defaults.getDefaults().vespaWebServicePort(), "/data-plane-tokens/v1"));
}
@Override public void getConfig(Builder builder) { tokenFilter.getConfig(builder); }
}
cluster.addComponent(new CloudTokenDataPlaneHandler());
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
private static final int MIN_ZOOKEEPER_NODE_COUNT = 1;
private static final int MAX_ZOOKEEPER_NODE_COUNT = 7;
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds = List.of(ConfigModelId.fromName(CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilderBase<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, TreeConfigProducer<AnyConfigProducer> ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec, deployState);
addProcessing(deployState, spec, cluster, context);
addSearch(deployState, spec, cluster, context);
addDocproc(deployState, spec, cluster);
addDocumentApi(deployState, spec, cluster, context);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec, context);
addClients(deployState, spec, cluster);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addNodes(cluster, spec, context);
addModelEvaluationRuntime(cluster);
addModelEvaluation(spec, cluster, context);
addServerProviders(deployState, spec, cluster);
if (!standaloneBuilder) cluster.addAllPlatformBundles();
addDeploymentSpecConfig(cluster, context, deployState.getDeployLogger());
addZooKeeper(cluster, spec);
addParameterStoreValidationHandler(cluster, deployState);
}
private void addParameterStoreValidationHandler(ApplicationContainerCluster cluster, DeployState deployState) {
if ( ! deployState.isHosted()) return;
cluster.addPlatformBundle(PlatformBundles.absoluteBundlePath("jdisc-cloud-aws"));
if (deployState.zone().system().isPublic()) {
BindingPattern bindingPattern = SystemBindingPattern.fromHttpPath("/validate-secret-store");
Handler handler = new Handler(
new ComponentModel("com.yahoo.jdisc.cloud.aws.AwsParameterStoreValidationHandler", null, "jdisc-cloud-aws", null));
handler.addServerBindings(bindingPattern);
cluster.addComponent(handler);
}
}
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) {
Element zooKeeper = getZooKeeper(spec);
if (zooKeeper == null) return;
Element nodesElement = XML.getChild(spec, "nodes");
boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of");
if (isCombined) {
throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper");
}
long nonRetiredNodes = cluster.getContainers().stream().filter(c -> !c.isRetired()).count();
if (nonRetiredNodes < MIN_ZOOKEEPER_NODE_COUNT || nonRetiredNodes > MAX_ZOOKEEPER_NODE_COUNT || nonRetiredNodes % 2 == 0) {
throw new IllegalArgumentException("Cluster with ZooKeeper needs an odd number of nodes, between " +
MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT +
", have " + nonRetiredNodes + " non-retired");
}
cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade");
cluster.addSimpleComponent("com.yahoo.vespa.curator.CuratorWrapper", null, "zkfacade");
String sessionTimeoutSeconds = zooKeeper.getAttribute("session-timeout-seconds");
if ( ! sessionTimeoutSeconds.isBlank()) {
try {
int timeoutSeconds = Integer.parseInt(sessionTimeoutSeconds);
if (timeoutSeconds <= 0) throw new IllegalArgumentException("must be a positive value");
cluster.setZookeeperSessionTimeoutSeconds(timeoutSeconds);
}
catch (RuntimeException e) {
throw new IllegalArgumentException("invalid zookeeper session-timeout-seconds '" + sessionTimeoutSeconds + "'", e);
}
}
cluster.getContainers().forEach(ContainerModelBuilder::addReconfigurableZooKeeperServerComponents);
}
public static void addReconfigurableZooKeeperServerComponents(Container container) {
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.Reconfigurer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", container));
}
private static SimpleComponent zookeeperComponent(String idSpec, Container container) {
String configId = container.getConfigId();
return new SimpleComponent(new ComponentModel(idSpec, null, "zookeeper-server", configId));
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec, DeployState deployState) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
String type = secretStoreElement.getAttribute("type");
if ("cloud".equals(type)) {
addCloudSecretStore(cluster, secretStoreElement, deployState);
} else {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
}
private void addCloudSecretStore(ApplicationContainerCluster cluster, Element secretStoreElement, DeployState deployState) {
if ( ! deployState.isHosted()) return;
if ( ! cluster.getZone().system().isPublic())
throw new IllegalArgumentException("Cloud secret store is not supported in non-public system, see the documentation");
CloudSecretStore cloudSecretStore = new CloudSecretStore();
Map<String, TenantSecretStore> secretStoresByName = deployState.getProperties().tenantSecretStores()
.stream()
.collect(Collectors.toMap(
TenantSecretStore::getName,
store -> store
));
Element store = XML.getChild(secretStoreElement, "store");
for (Element group : XML.getChildren(store, "aws-parameter-store")) {
String account = group.getAttribute("account");
String region = group.getAttribute("aws-region");
TenantSecretStore secretStore = secretStoresByName.get(account);
if (secretStore == null)
throw new IllegalArgumentException("No configured secret store named " + account);
if (secretStore.getExternalId().isEmpty())
throw new IllegalArgumentException("No external ID has been set");
cloudSecretStore.addConfig(account, region, secretStore.getAwsId(), secretStore.getRole(), secretStore.getExternalId().get());
}
cluster.addComponent(cloudSecretStore);
}
private void addDeploymentSpecConfig(ApplicationContainerCluster cluster, ConfigModelContext context, DeployLogger deployLogger) {
if ( ! context.getDeployState().isHosted()) return;
DeploymentSpec deploymentSpec = app.getDeploymentSpec();
if (deploymentSpec.isEmpty()) return;
for (var deprecatedElement : deploymentSpec.deprecatedElements()) {
deployLogger.logApplicationPackage(WARNING, deprecatedElement.humanReadableString());
}
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().getEndpoints());
}
private void addRotationProperties(ApplicationContainerCluster cluster, Set<ContainerEndpoint> endpoints) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", "true");
});
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.filter(endpoint -> endpoint.scope() == ApplicationClusterEndpoint.Scope.global)
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toCollection(LinkedHashSet::new));
container.setProp("rotations", String.join(",", rotationsProperty));
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element parent) {
for (Element components : XML.getChildren(parent, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, parent, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(
name + "-status-handler",
statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
SystemBindingPattern.fromHttpPath("/" + name)));
} else {
cluster.addVipHandler();
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
protected void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault()) {
cluster.addAccessLog();
} else {
if (cluster.isHostedVespa()) {
log.logApplicationPackage(WARNING, "Applications are not allowed to override the 'accesslog' element");
} else {
List<AccessLogComponent> components = new ArrayList<>();
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(accessLogComponent -> {
components.add(accessLogComponent);
cluster.addComponent(accessLogComponent);
});
}
if (components.size() > 0) {
cluster.removeSimpleComponent(VoidRequestLog.class);
cluster.addSimpleComponent(AccessLog.class);
}
}
}
if (cluster.getAllComponents().stream().anyMatch(component -> component instanceof AccessLogComponent))
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "access"));
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement, context));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addHostedImplicitAccessControlIfNotPresent(deployState, cluster);
addDefaultConnectorHostedFilterBinding(cluster);
addCloudMtlsConnector(deployState, cluster);
addCloudDataPlaneFilter(deployState, cluster);
addCloudTokenSupport(deployState, cluster);
}
}
private static void addCloudDataPlaneFilter(DeployState deployState, ApplicationContainerCluster cluster) {
if (!deployState.isHosted() || !deployState.zone().system().isPublic()) return;
var dataplanePort = getMtlsDataplanePort(deployState);
var secureChain = new HttpFilterChain("cloud-data-plane-secure", HttpFilterChain.Type.SYSTEM);
secureChain.addInnerComponent(new CloudDataPlaneFilter(cluster, deployState));
cluster.getHttp().getFilterChains().add(secureChain);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == dataplanePort).findAny().orElseThrow()
.setDefaultRequestFilterChain(secureChain.getComponentId());
var insecureChain = new HttpFilterChain("cloud-data-plane-insecure", HttpFilterChain.Type.SYSTEM);
insecureChain.addInnerComponent(new Filter(
new ChainedComponentModel(
new BundleInstantiationSpecification(
new ComponentSpecification("com.yahoo.jdisc.http.filter.security.misc.NoopFilter"),
null, new ComponentSpecification("jdisc-security-filters")),
Dependencies.emptyDependencies())));
cluster.getHttp().getFilterChains().add(insecureChain);
var insecureChainComponentSpec = new ComponentSpecification(insecureChain.getComponentId().toString());
FilterBinding insecureBinding =
FilterBinding.create(FilterBinding.Type.REQUEST, insecureChainComponentSpec, VIP_HANDLER_BINDING);
cluster.getHttp().getBindings().add(insecureBinding);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == Defaults.getDefaults().vespaWebServicePort()).findAny().orElseThrow()
.setDefaultRequestFilterChain(insecureChain.getComponentId());
}
protected void addClients(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
if (!deployState.isHosted() || !deployState.zone().system().isPublic()) return;
List<Client> clients;
Element clientsElement = XML.getChild(spec, "clients");
boolean legacyMode = false;
if (clientsElement == null) {
clients = List.of(new Client(
"default", List.of(), getCertificates(app.getFile(Path.fromString("security/clients.pem"))), List.of()));
legacyMode = true;
} else {
clients = XML.getChildren(clientsElement, "client").stream()
.flatMap(elem -> getClient(elem, deployState).stream())
.toList();
boolean atLeastOneClientWithCertificate = clients.stream().anyMatch(client -> !client.certificates().isEmpty());
if (!atLeastOneClientWithCertificate)
throw new IllegalArgumentException("At least one client must require a certificate");
}
List<X509Certificate> operatorAndTesterCertificates = deployState.getProperties().operatorCertificates();
if(!operatorAndTesterCertificates.isEmpty())
clients = Stream.concat(clients.stream(), Stream.of(Client.internalClient(operatorAndTesterCertificates))).toList();
cluster.setClients(legacyMode, clients);
}
private Optional<Client> getClient(Element clientElement, DeployState state) {
String clientId = XML.attribute("id", clientElement).orElseThrow();
if (clientId.startsWith("_"))
throw new IllegalArgumentException("Invalid client id '%s', id cannot start with '_'".formatted(clientId));
List<String> permissions = XML.attribute("permissions", clientElement)
.map(p -> p.split(",")).stream()
.flatMap(Arrays::stream)
.toList();
var certificates = XML.getChildren(clientElement, "certificate").stream()
.flatMap(certElem -> {
var file = app.getFile(Path.fromString(certElem.getAttribute("file")));
if (!file.exists()) {
throw new IllegalArgumentException("Certificate file '%s' for client '%s' does not exist"
.formatted(file.getPath().getRelative(), clientId));
}
return getCertificates(file).stream();
})
.toList();
if (!certificates.isEmpty()) return Optional.of(new Client(clientId, permissions, certificates, List.of()));
var knownTokens = state.getProperties().dataplaneTokens().stream()
.collect(Collectors.toMap(DataplaneToken::tokenId, Function.identity()));
var referencedTokens = XML.getChildren(clientElement, "token").stream()
.map(elem -> {
var tokenId = elem.getAttribute("id");
var token = knownTokens.get(tokenId);
if (token == null)
log.logApplicationPackage(
WARNING, "Token '%s' for client '%s' does not exist".formatted(tokenId, clientId));
return token;
})
.filter(token -> {
if (token == null) return false;
boolean empty = token.versions().isEmpty();
if (empty)
log.logApplicationPackage(
WARNING, "Token '%s' for client '%s' has no active versions"
.formatted(token.tokenId(), clientId));
return !empty;
})
.toList();
if (referencedTokens.isEmpty()) {
log.log(Level.INFO, "Skipping client '%s' as it does not refer to any activate tokens".formatted(clientId));
return Optional.empty();
}
return Optional.of(new Client(clientId, permissions, List.of(), referencedTokens));
}
private List<X509Certificate> getCertificates(ApplicationFile file) {
if (!file.exists()) return List.of();
try {
Reader reader = file.createReader();
String certPem = IOUtils.readAll(reader);
reader.close();
List<X509Certificate> x509Certificates = X509CertificateUtils.certificateListFromPem(certPem);
if (x509Certificates.isEmpty()) {
throw new IllegalArgumentException("File %s does not contain any certificates.".formatted(file.getPath().getRelative()));
}
return x509Certificates;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) {
cluster.getHttp().getAccessControl()
.ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp()));
}
private void addCloudMtlsConnector(DeployState state, ApplicationContainerCluster cluster) {
JettyHttpServer server = cluster.getHttp().getHttpServer().get();
String serverName = server.getComponentId().getName();
var builder = HostedSslConnectorFactory.builder(serverName, getMtlsDataplanePort(state))
.proxyProtocol(true, state.getProperties().featureFlags().enableProxyProtocolMixedMode())
.tlsCiphersOverride(state.getProperties().tlsCiphersOverride())
.endpointConnectionTtl(state.getProperties().endpointConnectionTtl());
var endpointCert = state.endpointCertificateSecrets().orElse(null);
if (endpointCert != null) {
builder.endpointCertificate(endpointCert);
boolean isPublic = state.zone().system().isPublic();
List<X509Certificate> clientCertificates = getClientCertificates(cluster);
if (isPublic) {
if (clientCertificates.isEmpty())
throw new IllegalArgumentException("Client certificate authority security/clients.pem is missing - " +
"see: https:
builder.tlsCaCertificatesPem(X509CertificateUtils.toPem(clientCertificates))
.clientAuth(SslClientAuth.WANT_WITH_ENFORCER);
} else {
builder.tlsCaCertificatesPath("/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem");
var needAuth = cluster.getHttp().getAccessControl()
.map(accessControl -> accessControl.clientAuthentication)
.map(clientAuth -> clientAuth == AccessControl.ClientAuthentication.need)
.orElse(false);
builder.clientAuth(needAuth ? SslClientAuth.NEED : SslClientAuth.WANT);
}
} else {
builder.clientAuth(SslClientAuth.WANT_WITH_ENFORCER);
}
var connectorFactory = builder.build();
cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory));
server.addConnector(connectorFactory);
}
private List<X509Certificate> getClientCertificates(ApplicationContainerCluster cluster) {
return cluster.getClients()
.stream()
.map(Client::certificates)
.flatMap(Collection::stream)
.toList();
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
return context.getDeployState().isHostedTenantApplication(context.getApplicationType());
}
private static void addHostedImplicitHttpIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
if (cluster.getHttp() == null) {
cluster.setHttp(new Http(new FilterChains(cluster)));
}
JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null);
if (httpServer == null) {
httpServer = new JettyHttpServer("DefaultHttpServer", cluster, deployState);
cluster.getHttp().setHttpServer(httpServer);
}
int defaultPort = Defaults.getDefaults().vespaWebServicePort();
boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort);
if (!defaultConnectorPresent) {
httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build());
}
}
private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
Http http = cluster.getHttp();
if (http.getAccessControl().isPresent()) return;
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
if (tenantDomain == null) return;
new AccessControl.Builder(tenantDomain.value())
.setHandlers(cluster)
.clientAuthentication(AccessControl.ClientAuthentication.need)
.build()
.configureHttpFilterChains(http);
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement, ConfigModelContext context) {
Http http = new HttpBuilder(portBindingOverride(deployState, context)).build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addDocumentApi(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(deployState, cluster, spec, context);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(deployState, cluster, searchElement, context);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
FileDistributedOnnxModels models = profiles.getOnnxModels().clone();
Element onnxElement = XML.getChild(modelEvaluationElement, "onnx");
Element modelsElement = XML.getChild(onnxElement, "models");
for (Element modelElement : XML.getChildren(modelsElement, "model") ) {
OnnxModel onnxModel = models.asMap().get(modelElement.getAttribute("name"));
if (onnxModel == null) {
String availableModels = String.join(", ", profiles.getOnnxModels().asMap().keySet());
context.getDeployState().getDeployLogger().logApplicationPackage(WARNING,
"Model '" + modelElement.getAttribute("name") + "' not found. Available ONNX " +
"models are: " + availableModels + ". Skipping this configuration.");
continue;
}
onnxModel.setStatelessExecutionMode(getStringValue(modelElement, "execution-mode", null));
onnxModel.setStatelessInterOpThreads(getIntValue(modelElement, "interop-threads", -1));
onnxModel.setStatelessIntraOpThreads(getIntValue(modelElement, "intraop-threads", -1));
Element gpuDeviceElement = XML.getChild(modelElement, "gpu-device");
if (gpuDeviceElement != null) {
int gpuDevice = Integer.parseInt(gpuDeviceElement.getTextContent());
boolean hasGpu = cluster.getContainers().stream().anyMatch(container -> container.getHostResource() != null &&
!container.getHostResource().realResources().gpuResources().isZero());
onnxModel.setGpuDevice(gpuDevice, hasGpu);
}
cluster.onnxModelCost().registerModel(context.getApplicationPackage().getFile(onnxModel.getFilePath()));
}
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles, models));
}
private String getStringValue(Element element, String name, String defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? child.getTextContent() : defaultValue;
}
private int getIntValue(Element element, String name, int defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? Integer.parseInt(child.getTextContent()) : defaultValue;
}
protected void addModelEvaluationRuntime(ApplicationContainerCluster cluster) {
/* These bundles are added to all application container clusters, even if they haven't
* declared 'model-evaluation' in services.xml, because there are many public API packages
* in the model-evaluation bundle that could be used by customer code. */
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_EVALUATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_INTEGRATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.ONNXRUNTIME_BUNDLE_FILE);
/* The ONNX runtime is always available for injection to any component */
cluster.addSimpleComponent(
ContainerModelEvaluation.ONNX_RUNTIME_CLASS, null, ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME);
/* Add runtime providing utilities such as metrics to embedder implementations */
cluster.addSimpleComponent(
"ai.vespa.embedding.EmbedderRuntime", null, ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME);
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
cluster.addSearchAndDocprocBundles();
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(deployState, context, processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder()
.build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(deployState, containerCluster, searchChains);
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster, portBindingOverride(deployState, context)).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1)))
throw new IllegalArgumentException("Expected container version to be 1.0, but got " + version);
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster, context.getDeployState());
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster, DeployState deployState) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), deployState);
cluster.addContainers(Collections.singleton(container));
}
private static String buildJvmGCOptions(ConfigModelContext context, String jvmGCOptions) {
return new JvmGcOptions(context.getDeployState(), jvmGCOptions).build();
}
private static String getJvmOptions(Element nodesElement,
DeployState deployState,
boolean legacyOptions) {
return new JvmOptions(nodesElement, deployState, legacyOptions).build();
}
private static String extractAttribute(Element element, String attrName) {
return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null;
}
private void extractJvmOptions(List<ApplicationContainer> nodes,
ApplicationContainerCluster cluster,
Element nodesElement,
ConfigModelContext context) {
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, nodesElement, jvmElement, context);
}
}
private void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(nodesElement, context.getDeployState(), true));
if (cluster.getJvmGCOptions().isEmpty()) {
String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS);
if (jvmGCOptions != null && !jvmGCOptions.isEmpty()) {
DeployLogger logger = context.getDeployState().getDeployLogger();
logger.logApplicationPackage(WARNING, "'jvm-gc-options' is deprecated and will be removed in Vespa 9." +
" Please merge into 'gc-options' in 'jvm' element." +
" See https:
}
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
if (applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)))
context.getDeployState().getDeployLogger()
.logApplicationPackage(WARNING, "'allocated-memory' is deprecated and will be removed in Vespa 9." +
" Please merge into 'allocated-memory' in 'jvm' element." +
" See https:
}
private void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(nodesElement, context.getDeployState(), false));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
/**
* Add nodes to cluster according to the given containerElement.
*
* Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set
* of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed
* simultaneously for all active config models.
*/
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context);
extractJvmOptions(nodes, cluster, nodesElement, context);
applyDefaultPreload(nodes, nodesElement);
var envVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT)).entrySet();
for (var container : nodes) {
for (var entry : envVars) {
container.addEnvironmentVariable(entry.getKey(), entry.getValue());
}
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private ZoneEndpoint zoneEndpoint(ConfigModelContext context, ClusterSpec.Id cluster) {
InstanceName instance = context.properties().applicationId().instance();
ZoneId zone = ZoneId.from(context.properties().zone().environment(),
context.properties().zone().region());
return context.getApplicationPackage().getDeploymentSpec().zoneEndpoint(instance, zone, cluster);
}
private static Map<String, String> getEnvironmentVariables(Element environmentVariables) {
var map = new LinkedHashMap<String, String>();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
var name = new com.yahoo.text.Identifier(var.getNodeName());
map.put(name.toString(), var.getTextContent());
}
}
return map;
}
private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element containerElement,
Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of")) {
List<ApplicationContainer> containers = createNodesFromContentServiceReference(cluster, nodesElement, context);
log.logApplicationPackage(WARNING, "Declaring combined cluster with <nodes of=\"...\"> is deprecated without " +
"replacement, and the feature will be removed in Vespa 9. Use separate container and " +
"content clusters instead");
return containers;
} else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private static boolean applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
try {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return false;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("Missing % sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
return true;
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign", e);
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
deployState.getDeployLogger().logApplicationPackage(Level.INFO, "Using " + nodeCount + " nodes in " + cluster);
var nodesSpec = NodesSpecification.dedicated(nodeCount, context);
ClusterSpec.Id clusterId = ClusterSpec.Id.from(cluster.getName());
var hosts = nodesSpec.provision(hostSystem,
ClusterSpec.Type.container,
clusterId,
zoneEndpoint(context, clusterId),
deployState.getDeployLogger(),
false,
context.clusterInfo().build());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
else {
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
}
}
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, context.getDeployState());
node.setHostResource(host);
node.initService(context.getDeployState());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
try {
var nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
var clusterId = ClusterSpec.Id.from(cluster.name());
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
clusterId,
zoneEndpoint(context, clusterId),
log,
getZooKeeper(containerElement) != null,
context.clusterInfo().build());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("In " + cluster, e);
}
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepository(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), log);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromHosts(Map<HostResource, ClusterMembership> hosts,
ApplicationContainerCluster cluster,
DeployState deployState) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), deployState);
container.setHostResource(entry.getKey());
container.initService(deployState);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(DeployState deployState, ApplicationContainerCluster cluster, Element searchElement, ConfigModelContext context) {
var bindingPatterns = List.<BindingPattern>of(SearchHandler.DEFAULT_BINDING);
if (isHostedTenantApplication(context)) {
bindingPatterns = SearchHandler.bindingPattern(getDataplanePorts(deployState));
}
SearchHandler searchHandler = new SearchHandler(cluster,
serverBindings(deployState, context, searchElement, bindingPatterns),
ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null));
cluster.addComponent(searchHandler);
searchHandler.addComponent(Component.fromClassAndBundle(SearchHandler.EXECUTION_FACTORY, PlatformBundles.SEARCH_AND_DOCPROC_BUNDLE));
}
private List<BindingPattern> serverBindings(DeployState deployState, ConfigModelContext context, Element searchElement, Collection<BindingPattern> defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return List.copyOf(defaultBindings);
return toBindingList(deployState, context, bindings);
}
private List<BindingPattern> toBindingList(DeployState deployState, ConfigModelContext context, List<Element> bindingElements) {
List<BindingPattern> result = new ArrayList<>();
var portOverride = isHostedTenantApplication(context) ? getDataplanePorts(deployState) : Set.<Integer>of();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.addAll(userBindingPattern(text, portOverride));
}
return result;
}
private static Collection<UserBindingPattern> userBindingPattern(String path, Set<Integer> portBindingOverride) {
UserBindingPattern bindingPattern = UserBindingPattern.fromPattern(path);
if (portBindingOverride.isEmpty()) return Set.of(bindingPattern);
return portBindingOverride.stream()
.map(bindingPattern::withOverriddenPort)
.toList();
}
private ContainerDocumentApi buildDocumentApi(DeployState deployState, ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.HandlerOptions documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
Element ignoreUndefinedFields = XML.getChild(documentApiElement, "ignore-undefined-fields");
return new ContainerDocumentApi(cluster, documentApiOptions,
"true".equals(XML.getValue(ignoreUndefinedFields)), portBindingOverride(deployState, context));
}
private Set<Integer> portBindingOverride(DeployState deployState, ConfigModelContext context) {
return isHostedTenantApplication(context)
? getDataplanePorts(deployState)
: Set.<Integer>of();
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement, deployState.getDeployLogger());
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element parent, String componentName) {
for (Element component : XML.getChildren(parent, componentName)) {
ModelIdResolver.resolveModelIds(component, deployState.isHosted());
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, component));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec,
String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.athenzService(app.getApplicationId().instance(), zone.environment(), zone.region())
.orElseThrow(() -> new IllegalArgumentException("Missing Athenz service configuration in instance '" +
app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain,
service,
getLoadBalancerName(loadBalancerName, configServerSpecs),
ztsUrl,
zoneDnsSuffix,
zone);
cluster.removeComponent(ComponentId.fromString("com.yahoo.container.jdisc.AthenzIdentityProviderProvider"));
cluster.addComponent(identityProvider);
var serviceIdentityProviderProvider = "com.yahoo.vespa.athenz.identityprovider.client.ServiceIdentityProviderProvider";
cluster.addComponent(new SimpleComponent(new ComponentModel(serviceIdentityProviderProvider, serviceIdentityProviderProvider, "vespa-athenz")));
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.of(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
private static Element getZooKeeper(Element spec) {
return XML.getChild(spec, "zookeeper");
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName());
}
/**
* Validates JVM options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system.
*/
private static class JvmOptions {
private static final Pattern validPattern = Pattern.compile("-[a-zA-z0-9=:./,+*-]+");
private static final Pattern invalidInHostedPattern = Pattern.compile("-Xrunjdwp:transport=.*");
private final Element nodesElement;
private final DeployLogger logger;
private final boolean legacyOptions;
private final boolean isHosted;
public JvmOptions(Element nodesElement, DeployState deployState, boolean legacyOptions) {
this.nodesElement = nodesElement;
this.logger = deployState.getDeployLogger();
this.legacyOptions = legacyOptions;
this.isHosted = deployState.isHosted();
}
String build() {
if (legacyOptions)
return buildLegacyOptions();
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) return "";
String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS);
if (jvmOptions.isEmpty()) return "";
validateJvmOptions(jvmOptions);
return jvmOptions;
}
String buildLegacyOptions() {
String jvmOptions = null;
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (! jvmOptions.isEmpty())
logger.logApplicationPackage(WARNING, "'jvm-options' is deprecated and will be removed in Vespa 9." +
" Please merge 'jvm-options' into 'options' or 'gc-options' in 'jvm' element." +
" See https:
}
validateJvmOptions(jvmOptions);
return jvmOptions;
}
private void validateJvmOptions(String jvmOptions) {
if (jvmOptions == null || jvmOptions.isEmpty()) return;
String[] optionList = jvmOptions.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option))
.sorted()
.collect(Collectors.toCollection(ArrayList::new));
if (isHosted)
invalidOptions.addAll(Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> Pattern.matches(invalidInHostedPattern.pattern(), option))
.sorted().toList());
if (invalidOptions.isEmpty()) return;
String message = "Invalid or misplaced JVM options in services.xml: " +
String.join(",", invalidOptions) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
/**
* Validates JVM GC options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system
* (e.g. uses CMS options for hosted Vespa, which uses JDK 17).
*/
private static class JvmGcOptions {
private static final Pattern validPattern = Pattern.compile("-XX:[+-]*[a-zA-z0-9=]+");
private static final Pattern invalidCMSPattern = Pattern.compile("-XX:[+-]\\w*CMS[a-zA-z0-9=]+");
private final DeployState deployState;
private final String jvmGcOptions;
private final DeployLogger logger;
private final boolean isHosted;
public JvmGcOptions(DeployState deployState, String jvmGcOptions) {
this.deployState = deployState;
this.jvmGcOptions = jvmGcOptions;
this.logger = deployState.getDeployLogger();
this.isHosted = deployState.isHosted();
}
private String build() {
String options = deployState.getProperties().jvmGCOptions();
if (jvmGcOptions != null) {
options = jvmGcOptions;
String[] optionList = options.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option)
|| Pattern.matches(invalidCMSPattern.pattern(), option)
|| option.equals("-XX:+UseConcMarkSweepGC"))
.sorted()
.toList();
logOrFailInvalidOptions(invalidOptions);
}
if (options == null || options.isEmpty())
options = deployState.isHosted() ? ContainerCluster.PARALLEL_GC : ContainerCluster.G1GC;
return options;
}
private void logOrFailInvalidOptions(List<String> options) {
if (options.isEmpty()) return;
String message = "Invalid or misplaced JVM GC options in services.xml: " +
String.join(",", options) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
private static Set<Integer> getDataplanePorts(DeployState ds) {
var tokenPort = getTokenDataplanePort(ds);
var mtlsPort = getMtlsDataplanePort(ds);
return tokenPort.isPresent() ? Set.of(mtlsPort, tokenPort.getAsInt()) : Set.of(mtlsPort);
}
private static int getMtlsDataplanePort(DeployState ds) {
return enableTokenSupport(ds) ? 8443 : 4443;
}
private static OptionalInt getTokenDataplanePort(DeployState ds) {
return enableTokenSupport(ds) ? OptionalInt.of(8444) : OptionalInt.empty();
}
private static Set<ContainerEndpoint> tokenEndpoints(DeployState deployState) {
return deployState.getEndpoints().stream()
.filter(endpoint -> endpoint.authMethod() == ApplicationClusterEndpoint.AuthMethod.token)
.collect(Collectors.toSet());
}
private static boolean enableTokenSupport(DeployState state) {
Set<ContainerEndpoint> tokenEndpoints = tokenEndpoints(state);
return state.isHosted() && state.zone().system().isPublic() && ! tokenEndpoints.isEmpty();
}
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
private static final int MIN_ZOOKEEPER_NODE_COUNT = 1;
private static final int MAX_ZOOKEEPER_NODE_COUNT = 7;
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds = List.of(ConfigModelId.fromName(CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilderBase<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, TreeConfigProducer<AnyConfigProducer> ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec, deployState);
addProcessing(deployState, spec, cluster, context);
addSearch(deployState, spec, cluster, context);
addDocproc(deployState, spec, cluster);
addDocumentApi(deployState, spec, cluster, context);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec, context);
addClients(deployState, spec, cluster);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addNodes(cluster, spec, context);
addModelEvaluationRuntime(cluster);
addModelEvaluation(spec, cluster, context);
addServerProviders(deployState, spec, cluster);
if (!standaloneBuilder) cluster.addAllPlatformBundles();
addDeploymentSpecConfig(cluster, context, deployState.getDeployLogger());
addZooKeeper(cluster, spec);
addParameterStoreValidationHandler(cluster, deployState);
}
private void addParameterStoreValidationHandler(ApplicationContainerCluster cluster, DeployState deployState) {
if ( ! deployState.isHosted()) return;
cluster.addPlatformBundle(PlatformBundles.absoluteBundlePath("jdisc-cloud-aws"));
if (deployState.zone().system().isPublic()) {
BindingPattern bindingPattern = SystemBindingPattern.fromHttpPath("/validate-secret-store");
Handler handler = new Handler(
new ComponentModel("com.yahoo.jdisc.cloud.aws.AwsParameterStoreValidationHandler", null, "jdisc-cloud-aws", null));
handler.addServerBindings(bindingPattern);
cluster.addComponent(handler);
}
}
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) {
Element zooKeeper = getZooKeeper(spec);
if (zooKeeper == null) return;
Element nodesElement = XML.getChild(spec, "nodes");
boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of");
if (isCombined) {
throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper");
}
long nonRetiredNodes = cluster.getContainers().stream().filter(c -> !c.isRetired()).count();
if (nonRetiredNodes < MIN_ZOOKEEPER_NODE_COUNT || nonRetiredNodes > MAX_ZOOKEEPER_NODE_COUNT || nonRetiredNodes % 2 == 0) {
throw new IllegalArgumentException("Cluster with ZooKeeper needs an odd number of nodes, between " +
MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT +
", have " + nonRetiredNodes + " non-retired");
}
cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade");
cluster.addSimpleComponent("com.yahoo.vespa.curator.CuratorWrapper", null, "zkfacade");
String sessionTimeoutSeconds = zooKeeper.getAttribute("session-timeout-seconds");
if ( ! sessionTimeoutSeconds.isBlank()) {
try {
int timeoutSeconds = Integer.parseInt(sessionTimeoutSeconds);
if (timeoutSeconds <= 0) throw new IllegalArgumentException("must be a positive value");
cluster.setZookeeperSessionTimeoutSeconds(timeoutSeconds);
}
catch (RuntimeException e) {
throw new IllegalArgumentException("invalid zookeeper session-timeout-seconds '" + sessionTimeoutSeconds + "'", e);
}
}
cluster.getContainers().forEach(ContainerModelBuilder::addReconfigurableZooKeeperServerComponents);
}
public static void addReconfigurableZooKeeperServerComponents(Container container) {
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.Reconfigurer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", container));
}
private static SimpleComponent zookeeperComponent(String idSpec, Container container) {
String configId = container.getConfigId();
return new SimpleComponent(new ComponentModel(idSpec, null, "zookeeper-server", configId));
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec, DeployState deployState) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
String type = secretStoreElement.getAttribute("type");
if ("cloud".equals(type)) {
addCloudSecretStore(cluster, secretStoreElement, deployState);
} else {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
}
private void addCloudSecretStore(ApplicationContainerCluster cluster, Element secretStoreElement, DeployState deployState) {
if ( ! deployState.isHosted()) return;
if ( ! cluster.getZone().system().isPublic())
throw new IllegalArgumentException("Cloud secret store is not supported in non-public system, see the documentation");
CloudSecretStore cloudSecretStore = new CloudSecretStore();
Map<String, TenantSecretStore> secretStoresByName = deployState.getProperties().tenantSecretStores()
.stream()
.collect(Collectors.toMap(
TenantSecretStore::getName,
store -> store
));
Element store = XML.getChild(secretStoreElement, "store");
for (Element group : XML.getChildren(store, "aws-parameter-store")) {
String account = group.getAttribute("account");
String region = group.getAttribute("aws-region");
TenantSecretStore secretStore = secretStoresByName.get(account);
if (secretStore == null)
throw new IllegalArgumentException("No configured secret store named " + account);
if (secretStore.getExternalId().isEmpty())
throw new IllegalArgumentException("No external ID has been set");
cloudSecretStore.addConfig(account, region, secretStore.getAwsId(), secretStore.getRole(), secretStore.getExternalId().get());
}
cluster.addComponent(cloudSecretStore);
}
private void addDeploymentSpecConfig(ApplicationContainerCluster cluster, ConfigModelContext context, DeployLogger deployLogger) {
if ( ! context.getDeployState().isHosted()) return;
DeploymentSpec deploymentSpec = app.getDeploymentSpec();
if (deploymentSpec.isEmpty()) return;
for (var deprecatedElement : deploymentSpec.deprecatedElements()) {
deployLogger.logApplicationPackage(WARNING, deprecatedElement.humanReadableString());
}
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().getEndpoints());
}
private void addRotationProperties(ApplicationContainerCluster cluster, Set<ContainerEndpoint> endpoints) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", "true");
});
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.filter(endpoint -> endpoint.scope() == ApplicationClusterEndpoint.Scope.global)
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toCollection(LinkedHashSet::new));
container.setProp("rotations", String.join(",", rotationsProperty));
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element parent) {
for (Element components : XML.getChildren(parent, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, parent, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(
name + "-status-handler",
statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
SystemBindingPattern.fromHttpPath("/" + name)));
} else {
cluster.addVipHandler();
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
protected void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault()) {
cluster.addAccessLog();
} else {
if (cluster.isHostedVespa()) {
log.logApplicationPackage(WARNING, "Applications are not allowed to override the 'accesslog' element");
} else {
List<AccessLogComponent> components = new ArrayList<>();
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(accessLogComponent -> {
components.add(accessLogComponent);
cluster.addComponent(accessLogComponent);
});
}
if (components.size() > 0) {
cluster.removeSimpleComponent(VoidRequestLog.class);
cluster.addSimpleComponent(AccessLog.class);
}
}
}
if (cluster.getAllComponents().stream().anyMatch(component -> component instanceof AccessLogComponent))
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "access"));
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement, context));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addHostedImplicitAccessControlIfNotPresent(deployState, cluster);
addDefaultConnectorHostedFilterBinding(cluster);
addCloudMtlsConnector(deployState, cluster);
addCloudDataPlaneFilter(deployState, cluster);
addCloudTokenSupport(deployState, cluster);
}
}
private static void addCloudDataPlaneFilter(DeployState deployState, ApplicationContainerCluster cluster) {
if (!deployState.isHosted() || !deployState.zone().system().isPublic()) return;
var dataplanePort = getMtlsDataplanePort(deployState);
var secureChain = new HttpFilterChain("cloud-data-plane-secure", HttpFilterChain.Type.SYSTEM);
secureChain.addInnerComponent(new CloudDataPlaneFilter(cluster, deployState));
cluster.getHttp().getFilterChains().add(secureChain);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == dataplanePort).findAny().orElseThrow()
.setDefaultRequestFilterChain(secureChain.getComponentId());
var insecureChain = new HttpFilterChain("cloud-data-plane-insecure", HttpFilterChain.Type.SYSTEM);
insecureChain.addInnerComponent(new Filter(
new ChainedComponentModel(
new BundleInstantiationSpecification(
new ComponentSpecification("com.yahoo.jdisc.http.filter.security.misc.NoopFilter"),
null, new ComponentSpecification("jdisc-security-filters")),
Dependencies.emptyDependencies())));
cluster.getHttp().getFilterChains().add(insecureChain);
var insecureChainComponentSpec = new ComponentSpecification(insecureChain.getComponentId().toString());
FilterBinding insecureBinding =
FilterBinding.create(FilterBinding.Type.REQUEST, insecureChainComponentSpec, VIP_HANDLER_BINDING);
cluster.getHttp().getBindings().add(insecureBinding);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == Defaults.getDefaults().vespaWebServicePort()).findAny().orElseThrow()
.setDefaultRequestFilterChain(insecureChain.getComponentId());
}
protected void addClients(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
if (!deployState.isHosted() || !deployState.zone().system().isPublic()) return;
List<Client> clients;
Element clientsElement = XML.getChild(spec, "clients");
boolean legacyMode = false;
if (clientsElement == null) {
clients = List.of(new Client(
"default", List.of(), getCertificates(app.getFile(Path.fromString("security/clients.pem"))), List.of()));
legacyMode = true;
} else {
clients = XML.getChildren(clientsElement, "client").stream()
.flatMap(elem -> getClient(elem, deployState).stream())
.toList();
boolean atLeastOneClientWithCertificate = clients.stream().anyMatch(client -> !client.certificates().isEmpty());
if (!atLeastOneClientWithCertificate)
throw new IllegalArgumentException("At least one client must require a certificate");
}
List<X509Certificate> operatorAndTesterCertificates = deployState.getProperties().operatorCertificates();
if(!operatorAndTesterCertificates.isEmpty())
clients = Stream.concat(clients.stream(), Stream.of(Client.internalClient(operatorAndTesterCertificates))).toList();
cluster.setClients(legacyMode, clients);
}
private Optional<Client> getClient(Element clientElement, DeployState state) {
String clientId = XML.attribute("id", clientElement).orElseThrow();
if (clientId.startsWith("_"))
throw new IllegalArgumentException("Invalid client id '%s', id cannot start with '_'".formatted(clientId));
List<String> permissions = XML.attribute("permissions", clientElement)
.map(p -> p.split(",")).stream()
.flatMap(Arrays::stream)
.toList();
var certificates = XML.getChildren(clientElement, "certificate").stream()
.flatMap(certElem -> {
var file = app.getFile(Path.fromString(certElem.getAttribute("file")));
if (!file.exists()) {
throw new IllegalArgumentException("Certificate file '%s' for client '%s' does not exist"
.formatted(file.getPath().getRelative(), clientId));
}
return getCertificates(file).stream();
})
.toList();
if (!certificates.isEmpty()) return Optional.of(new Client(clientId, permissions, certificates, List.of()));
var knownTokens = state.getProperties().dataplaneTokens().stream()
.collect(Collectors.toMap(DataplaneToken::tokenId, Function.identity()));
var referencedTokens = XML.getChildren(clientElement, "token").stream()
.map(elem -> {
var tokenId = elem.getAttribute("id");
var token = knownTokens.get(tokenId);
if (token == null)
log.logApplicationPackage(
WARNING, "Token '%s' for client '%s' does not exist".formatted(tokenId, clientId));
return token;
})
.filter(token -> {
if (token == null) return false;
boolean empty = token.versions().isEmpty();
if (empty)
log.logApplicationPackage(
WARNING, "Token '%s' for client '%s' has no active versions"
.formatted(token.tokenId(), clientId));
return !empty;
})
.toList();
if (referencedTokens.isEmpty()) {
log.log(Level.INFO, "Skipping client '%s' as it does not refer to any activate tokens".formatted(clientId));
return Optional.empty();
}
return Optional.of(new Client(clientId, permissions, List.of(), referencedTokens));
}
private List<X509Certificate> getCertificates(ApplicationFile file) {
if (!file.exists()) return List.of();
try {
Reader reader = file.createReader();
String certPem = IOUtils.readAll(reader);
reader.close();
List<X509Certificate> x509Certificates = X509CertificateUtils.certificateListFromPem(certPem);
if (x509Certificates.isEmpty()) {
throw new IllegalArgumentException("File %s does not contain any certificates.".formatted(file.getPath().getRelative()));
}
return x509Certificates;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) {
cluster.getHttp().getAccessControl()
.ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp()));
}
private void addCloudMtlsConnector(DeployState state, ApplicationContainerCluster cluster) {
JettyHttpServer server = cluster.getHttp().getHttpServer().get();
String serverName = server.getComponentId().getName();
var builder = HostedSslConnectorFactory.builder(serverName, getMtlsDataplanePort(state))
.proxyProtocol(true, state.getProperties().featureFlags().enableProxyProtocolMixedMode())
.tlsCiphersOverride(state.getProperties().tlsCiphersOverride())
.endpointConnectionTtl(state.getProperties().endpointConnectionTtl());
var endpointCert = state.endpointCertificateSecrets().orElse(null);
if (endpointCert != null) {
builder.endpointCertificate(endpointCert);
boolean isPublic = state.zone().system().isPublic();
List<X509Certificate> clientCertificates = getClientCertificates(cluster);
if (isPublic) {
if (clientCertificates.isEmpty())
throw new IllegalArgumentException("Client certificate authority security/clients.pem is missing - " +
"see: https:
builder.tlsCaCertificatesPem(X509CertificateUtils.toPem(clientCertificates))
.clientAuth(SslClientAuth.WANT_WITH_ENFORCER);
} else {
builder.tlsCaCertificatesPath("/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem");
var needAuth = cluster.getHttp().getAccessControl()
.map(accessControl -> accessControl.clientAuthentication)
.map(clientAuth -> clientAuth == AccessControl.ClientAuthentication.need)
.orElse(false);
builder.clientAuth(needAuth ? SslClientAuth.NEED : SslClientAuth.WANT);
}
} else {
builder.clientAuth(SslClientAuth.WANT_WITH_ENFORCER);
}
var connectorFactory = builder.build();
cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory));
server.addConnector(connectorFactory);
}
private List<X509Certificate> getClientCertificates(ApplicationContainerCluster cluster) {
return cluster.getClients()
.stream()
.map(Client::certificates)
.flatMap(Collection::stream)
.toList();
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
return context.getDeployState().isHostedTenantApplication(context.getApplicationType());
}
private static void addHostedImplicitHttpIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
if (cluster.getHttp() == null) {
cluster.setHttp(new Http(new FilterChains(cluster)));
}
JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null);
if (httpServer == null) {
httpServer = new JettyHttpServer("DefaultHttpServer", cluster, deployState);
cluster.getHttp().setHttpServer(httpServer);
}
int defaultPort = Defaults.getDefaults().vespaWebServicePort();
boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort);
if (!defaultConnectorPresent) {
httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build());
}
}
private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
Http http = cluster.getHttp();
if (http.getAccessControl().isPresent()) return;
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
if (tenantDomain == null) return;
new AccessControl.Builder(tenantDomain.value())
.setHandlers(cluster)
.clientAuthentication(AccessControl.ClientAuthentication.need)
.build()
.configureHttpFilterChains(http);
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement, ConfigModelContext context) {
Http http = new HttpBuilder(portBindingOverride(deployState, context)).build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addDocumentApi(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(deployState, cluster, spec, context);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(deployState, cluster, searchElement, context);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
FileDistributedOnnxModels models = profiles.getOnnxModels().clone();
Element onnxElement = XML.getChild(modelEvaluationElement, "onnx");
Element modelsElement = XML.getChild(onnxElement, "models");
for (Element modelElement : XML.getChildren(modelsElement, "model") ) {
OnnxModel onnxModel = models.asMap().get(modelElement.getAttribute("name"));
if (onnxModel == null) {
String availableModels = String.join(", ", profiles.getOnnxModels().asMap().keySet());
context.getDeployState().getDeployLogger().logApplicationPackage(WARNING,
"Model '" + modelElement.getAttribute("name") + "' not found. Available ONNX " +
"models are: " + availableModels + ". Skipping this configuration.");
continue;
}
onnxModel.setStatelessExecutionMode(getStringValue(modelElement, "execution-mode", null));
onnxModel.setStatelessInterOpThreads(getIntValue(modelElement, "interop-threads", -1));
onnxModel.setStatelessIntraOpThreads(getIntValue(modelElement, "intraop-threads", -1));
Element gpuDeviceElement = XML.getChild(modelElement, "gpu-device");
if (gpuDeviceElement != null) {
int gpuDevice = Integer.parseInt(gpuDeviceElement.getTextContent());
boolean hasGpu = cluster.getContainers().stream().anyMatch(container -> container.getHostResource() != null &&
!container.getHostResource().realResources().gpuResources().isZero());
onnxModel.setGpuDevice(gpuDevice, hasGpu);
}
cluster.onnxModelCost().registerModel(context.getApplicationPackage().getFile(onnxModel.getFilePath()));
}
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles, models));
}
private String getStringValue(Element element, String name, String defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? child.getTextContent() : defaultValue;
}
private int getIntValue(Element element, String name, int defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? Integer.parseInt(child.getTextContent()) : defaultValue;
}
protected void addModelEvaluationRuntime(ApplicationContainerCluster cluster) {
/* These bundles are added to all application container clusters, even if they haven't
* declared 'model-evaluation' in services.xml, because there are many public API packages
* in the model-evaluation bundle that could be used by customer code. */
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_EVALUATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_INTEGRATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.ONNXRUNTIME_BUNDLE_FILE);
/* The ONNX runtime is always available for injection to any component */
cluster.addSimpleComponent(
ContainerModelEvaluation.ONNX_RUNTIME_CLASS, null, ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME);
/* Add runtime providing utilities such as metrics to embedder implementations */
cluster.addSimpleComponent(
"ai.vespa.embedding.EmbedderRuntime", null, ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME);
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
cluster.addSearchAndDocprocBundles();
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(deployState, context, processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder()
.build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(deployState, containerCluster, searchChains);
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster, portBindingOverride(deployState, context)).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1)))
throw new IllegalArgumentException("Expected container version to be 1.0, but got " + version);
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster, context.getDeployState());
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster, DeployState deployState) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), deployState);
cluster.addContainers(Collections.singleton(container));
}
private static String buildJvmGCOptions(ConfigModelContext context, String jvmGCOptions) {
return new JvmGcOptions(context.getDeployState(), jvmGCOptions).build();
}
private static String getJvmOptions(Element nodesElement,
DeployState deployState,
boolean legacyOptions) {
return new JvmOptions(nodesElement, deployState, legacyOptions).build();
}
private static String extractAttribute(Element element, String attrName) {
return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null;
}
private void extractJvmOptions(List<ApplicationContainer> nodes,
ApplicationContainerCluster cluster,
Element nodesElement,
ConfigModelContext context) {
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, nodesElement, jvmElement, context);
}
}
private void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(nodesElement, context.getDeployState(), true));
if (cluster.getJvmGCOptions().isEmpty()) {
String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS);
if (jvmGCOptions != null && !jvmGCOptions.isEmpty()) {
DeployLogger logger = context.getDeployState().getDeployLogger();
logger.logApplicationPackage(WARNING, "'jvm-gc-options' is deprecated and will be removed in Vespa 9." +
" Please merge into 'gc-options' in 'jvm' element." +
" See https:
}
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
if (applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)))
context.getDeployState().getDeployLogger()
.logApplicationPackage(WARNING, "'allocated-memory' is deprecated and will be removed in Vespa 9." +
" Please merge into 'allocated-memory' in 'jvm' element." +
" See https:
}
private void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(nodesElement, context.getDeployState(), false));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
/**
* Add nodes to cluster according to the given containerElement.
*
* Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set
* of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed
* simultaneously for all active config models.
*/
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context);
extractJvmOptions(nodes, cluster, nodesElement, context);
applyDefaultPreload(nodes, nodesElement);
var envVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT)).entrySet();
for (var container : nodes) {
for (var entry : envVars) {
container.addEnvironmentVariable(entry.getKey(), entry.getValue());
}
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private ZoneEndpoint zoneEndpoint(ConfigModelContext context, ClusterSpec.Id cluster) {
InstanceName instance = context.properties().applicationId().instance();
ZoneId zone = ZoneId.from(context.properties().zone().environment(),
context.properties().zone().region());
return context.getApplicationPackage().getDeploymentSpec().zoneEndpoint(instance, zone, cluster);
}
private static Map<String, String> getEnvironmentVariables(Element environmentVariables) {
var map = new LinkedHashMap<String, String>();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
var name = new com.yahoo.text.Identifier(var.getNodeName());
map.put(name.toString(), var.getTextContent());
}
}
return map;
}
private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element containerElement,
Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of")) {
List<ApplicationContainer> containers = createNodesFromContentServiceReference(cluster, nodesElement, context);
log.logApplicationPackage(WARNING, "Declaring combined cluster with <nodes of=\"...\"> is deprecated without " +
"replacement, and the feature will be removed in Vespa 9. Use separate container and " +
"content clusters instead");
return containers;
} else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private static boolean applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
try {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return false;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("Missing % sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
return true;
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign", e);
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
deployState.getDeployLogger().logApplicationPackage(Level.INFO, "Using " + nodeCount + " nodes in " + cluster);
var nodesSpec = NodesSpecification.dedicated(nodeCount, context);
ClusterSpec.Id clusterId = ClusterSpec.Id.from(cluster.getName());
var hosts = nodesSpec.provision(hostSystem,
ClusterSpec.Type.container,
clusterId,
zoneEndpoint(context, clusterId),
deployState.getDeployLogger(),
false,
context.clusterInfo().build());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
else {
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
}
}
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, context.getDeployState());
node.setHostResource(host);
node.initService(context.getDeployState());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
try {
var nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
var clusterId = ClusterSpec.Id.from(cluster.name());
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
clusterId,
zoneEndpoint(context, clusterId),
log,
getZooKeeper(containerElement) != null,
context.clusterInfo().build());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("In " + cluster, e);
}
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepository(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), log);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromHosts(Map<HostResource, ClusterMembership> hosts,
ApplicationContainerCluster cluster,
DeployState deployState) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), deployState);
container.setHostResource(entry.getKey());
container.initService(deployState);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(DeployState deployState, ApplicationContainerCluster cluster, Element searchElement, ConfigModelContext context) {
var bindingPatterns = List.<BindingPattern>of(SearchHandler.DEFAULT_BINDING);
if (isHostedTenantApplication(context)) {
bindingPatterns = SearchHandler.bindingPattern(getDataplanePorts(deployState));
}
SearchHandler searchHandler = new SearchHandler(cluster,
serverBindings(deployState, context, searchElement, bindingPatterns),
ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null));
cluster.addComponent(searchHandler);
searchHandler.addComponent(Component.fromClassAndBundle(SearchHandler.EXECUTION_FACTORY, PlatformBundles.SEARCH_AND_DOCPROC_BUNDLE));
}
private List<BindingPattern> serverBindings(DeployState deployState, ConfigModelContext context, Element searchElement, Collection<BindingPattern> defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return List.copyOf(defaultBindings);
return toBindingList(deployState, context, bindings);
}
private List<BindingPattern> toBindingList(DeployState deployState, ConfigModelContext context, List<Element> bindingElements) {
List<BindingPattern> result = new ArrayList<>();
var portOverride = isHostedTenantApplication(context) ? getDataplanePorts(deployState) : Set.<Integer>of();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.addAll(userBindingPattern(text, portOverride));
}
return result;
}
private static Collection<UserBindingPattern> userBindingPattern(String path, Set<Integer> portBindingOverride) {
UserBindingPattern bindingPattern = UserBindingPattern.fromPattern(path);
if (portBindingOverride.isEmpty()) return Set.of(bindingPattern);
return portBindingOverride.stream()
.map(bindingPattern::withOverriddenPort)
.toList();
}
private ContainerDocumentApi buildDocumentApi(DeployState deployState, ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.HandlerOptions documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
Element ignoreUndefinedFields = XML.getChild(documentApiElement, "ignore-undefined-fields");
return new ContainerDocumentApi(cluster, documentApiOptions,
"true".equals(XML.getValue(ignoreUndefinedFields)), portBindingOverride(deployState, context));
}
private Set<Integer> portBindingOverride(DeployState deployState, ConfigModelContext context) {
return isHostedTenantApplication(context)
? getDataplanePorts(deployState)
: Set.<Integer>of();
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement, deployState.getDeployLogger());
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element parent, String componentName) {
for (Element component : XML.getChildren(parent, componentName)) {
ModelIdResolver.resolveModelIds(component, deployState.isHosted());
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, component));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec,
String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.athenzService(app.getApplicationId().instance(), zone.environment(), zone.region())
.orElseThrow(() -> new IllegalArgumentException("Missing Athenz service configuration in instance '" +
app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain,
service,
getLoadBalancerName(loadBalancerName, configServerSpecs),
ztsUrl,
zoneDnsSuffix,
zone);
cluster.removeComponent(ComponentId.fromString("com.yahoo.container.jdisc.AthenzIdentityProviderProvider"));
cluster.addComponent(identityProvider);
var serviceIdentityProviderProvider = "com.yahoo.vespa.athenz.identityprovider.client.ServiceIdentityProviderProvider";
cluster.addComponent(new SimpleComponent(new ComponentModel(serviceIdentityProviderProvider, serviceIdentityProviderProvider, "vespa-athenz")));
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.of(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
private static Element getZooKeeper(Element spec) {
return XML.getChild(spec, "zookeeper");
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName());
}
/**
* Validates JVM options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system.
*/
private static class JvmOptions {
private static final Pattern validPattern = Pattern.compile("-[a-zA-z0-9=:./,+*-]+");
private static final Pattern invalidInHostedPattern = Pattern.compile("-Xrunjdwp:transport=.*");
private final Element nodesElement;
private final DeployLogger logger;
private final boolean legacyOptions;
private final boolean isHosted;
public JvmOptions(Element nodesElement, DeployState deployState, boolean legacyOptions) {
this.nodesElement = nodesElement;
this.logger = deployState.getDeployLogger();
this.legacyOptions = legacyOptions;
this.isHosted = deployState.isHosted();
}
String build() {
if (legacyOptions)
return buildLegacyOptions();
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) return "";
String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS);
if (jvmOptions.isEmpty()) return "";
validateJvmOptions(jvmOptions);
return jvmOptions;
}
String buildLegacyOptions() {
String jvmOptions = null;
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (! jvmOptions.isEmpty())
logger.logApplicationPackage(WARNING, "'jvm-options' is deprecated and will be removed in Vespa 9." +
" Please merge 'jvm-options' into 'options' or 'gc-options' in 'jvm' element." +
" See https:
}
validateJvmOptions(jvmOptions);
return jvmOptions;
}
private void validateJvmOptions(String jvmOptions) {
if (jvmOptions == null || jvmOptions.isEmpty()) return;
String[] optionList = jvmOptions.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option))
.sorted()
.collect(Collectors.toCollection(ArrayList::new));
if (isHosted)
invalidOptions.addAll(Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> Pattern.matches(invalidInHostedPattern.pattern(), option))
.sorted().toList());
if (invalidOptions.isEmpty()) return;
String message = "Invalid or misplaced JVM options in services.xml: " +
String.join(",", invalidOptions) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
/**
* Validates JVM GC options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system
* (e.g. uses CMS options for hosted Vespa, which uses JDK 17).
*/
private static class JvmGcOptions {
private static final Pattern validPattern = Pattern.compile("-XX:[+-]*[a-zA-z0-9=]+");
private static final Pattern invalidCMSPattern = Pattern.compile("-XX:[+-]\\w*CMS[a-zA-z0-9=]+");
private final DeployState deployState;
private final String jvmGcOptions;
private final DeployLogger logger;
private final boolean isHosted;
public JvmGcOptions(DeployState deployState, String jvmGcOptions) {
this.deployState = deployState;
this.jvmGcOptions = jvmGcOptions;
this.logger = deployState.getDeployLogger();
this.isHosted = deployState.isHosted();
}
private String build() {
String options = deployState.getProperties().jvmGCOptions();
if (jvmGcOptions != null) {
options = jvmGcOptions;
String[] optionList = options.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option)
|| Pattern.matches(invalidCMSPattern.pattern(), option)
|| option.equals("-XX:+UseConcMarkSweepGC"))
.sorted()
.toList();
logOrFailInvalidOptions(invalidOptions);
}
if (options == null || options.isEmpty())
options = deployState.isHosted() ? ContainerCluster.PARALLEL_GC : ContainerCluster.G1GC;
return options;
}
private void logOrFailInvalidOptions(List<String> options) {
if (options.isEmpty()) return;
String message = "Invalid or misplaced JVM GC options in services.xml: " +
String.join(",", options) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
private static Set<Integer> getDataplanePorts(DeployState ds) {
var tokenPort = getTokenDataplanePort(ds);
var mtlsPort = getMtlsDataplanePort(ds);
return tokenPort.isPresent() ? Set.of(mtlsPort, tokenPort.getAsInt()) : Set.of(mtlsPort);
}
private static int getMtlsDataplanePort(DeployState ds) {
return enableTokenSupport(ds) ? 8443 : 4443;
}
private static OptionalInt getTokenDataplanePort(DeployState ds) {
return enableTokenSupport(ds) ? OptionalInt.of(8444) : OptionalInt.empty();
}
private static Set<ContainerEndpoint> tokenEndpoints(DeployState deployState) {
return deployState.getEndpoints().stream()
.filter(endpoint -> endpoint.authMethod() == ApplicationClusterEndpoint.AuthMethod.token)
.collect(Collectors.toSet());
}
private static boolean enableTokenSupport(DeployState state) {
Set<ContainerEndpoint> tokenEndpoints = tokenEndpoints(state);
return state.isHosted() && state.zone().system().isPublic() && ! tokenEndpoints.isEmpty();
}
} |
```suggestion addServerBindings(SystemBindingPattern.fromHttpPortAndPath(Defaults.getDefaults().vespaWebServicePort(), "/data-plane-tokens/v1")); ``` | private void addCloudTokenSupport(DeployState state, ApplicationContainerCluster cluster) {
var server = cluster.getHttp().getHttpServer().get();
if (!enableTokenSupport(state)) return;
Set<String> tokenEndpoints = tokenEndpoints(state).stream()
.map(ContainerEndpoint::names)
.flatMap(Collection::stream)
.collect(Collectors.toSet());
var endpointCert = state.endpointCertificateSecrets().orElseThrow();
int tokenPort = getTokenDataplanePort(state).orElseThrow();
cluster.addSimpleComponent(DataplaneProxyCredentials.class);
cluster.addSimpleComponent(DataplaneProxyService.class);
var dataplaneProxy = new DataplaneProxy(
getMtlsDataplanePort(state),
tokenPort,
endpointCert.certificate(),
endpointCert.key(),
tokenEndpoints);
cluster.addComponent(dataplaneProxy);
var connector = HostedSslConnectorFactory.builder(server.getComponentId().getName()+"-token", tokenPort)
.tokenEndpoint(true)
.proxyProtocol(false, false)
.endpointCertificate(endpointCert)
.remoteAddressHeader("X-Forwarded-For")
.remotePortHeader("X-Forwarded-Port")
.clientAuth(SslClientAuth.NEED)
.build();
server.addConnector(connector);
var tokenChain = new HttpFilterChain("cloud-token-data-plane-secure", HttpFilterChain.Type.SYSTEM);
var tokenFilter = new CloudTokenDataPlaneFilter(cluster, state);
tokenChain.addInnerComponent(tokenFilter);
cluster.getHttp().getFilterChains().add(tokenChain);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == tokenPort).findAny().orElseThrow()
.setDefaultRequestFilterChain(tokenChain.getComponentId());
class CloudTokenDataPlaneHandler extends Handler implements CloudTokenDataPlaneFilterConfig.Producer {
CloudTokenDataPlaneHandler() {
super(new ComponentModel("com.yahoo.jdisc.http.filter.security.cloud.CloudTokenDataPlaneHandler", null, "jdisc-security-filters", null));
addServerBindings(SystemBindingPattern.fromHttpPortAndPath(Defaults.getDefaults().vespaWebServicePort(), "data-plane-tokens/v1"));
}
@Override public void getConfig(Builder builder) { tokenFilter.getConfig(builder); }
}
cluster.addComponent(new CloudTokenDataPlaneHandler());
} | addServerBindings(SystemBindingPattern.fromHttpPortAndPath(Defaults.getDefaults().vespaWebServicePort(), "data-plane-tokens/v1")); | private void addCloudTokenSupport(DeployState state, ApplicationContainerCluster cluster) {
var server = cluster.getHttp().getHttpServer().get();
if (!enableTokenSupport(state)) return;
Set<String> tokenEndpoints = tokenEndpoints(state).stream()
.map(ContainerEndpoint::names)
.flatMap(Collection::stream)
.collect(Collectors.toSet());
var endpointCert = state.endpointCertificateSecrets().orElseThrow();
int tokenPort = getTokenDataplanePort(state).orElseThrow();
cluster.addSimpleComponent(DataplaneProxyCredentials.class);
cluster.addSimpleComponent(DataplaneProxyService.class);
var dataplaneProxy = new DataplaneProxy(
getMtlsDataplanePort(state),
tokenPort,
endpointCert.certificate(),
endpointCert.key(),
tokenEndpoints);
cluster.addComponent(dataplaneProxy);
var connector = HostedSslConnectorFactory.builder(server.getComponentId().getName()+"-token", tokenPort)
.tokenEndpoint(true)
.proxyProtocol(false, false)
.endpointCertificate(endpointCert)
.remoteAddressHeader("X-Forwarded-For")
.remotePortHeader("X-Forwarded-Port")
.clientAuth(SslClientAuth.NEED)
.build();
server.addConnector(connector);
var tokenChain = new HttpFilterChain("cloud-token-data-plane-secure", HttpFilterChain.Type.SYSTEM);
var tokenFilter = new CloudTokenDataPlaneFilter(cluster, state);
tokenChain.addInnerComponent(tokenFilter);
cluster.getHttp().getFilterChains().add(tokenChain);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == tokenPort).findAny().orElseThrow()
.setDefaultRequestFilterChain(tokenChain.getComponentId());
class CloudTokenDataPlaneHandler extends Handler implements CloudTokenDataPlaneFilterConfig.Producer {
CloudTokenDataPlaneHandler() {
super(new ComponentModel("com.yahoo.jdisc.http.filter.security.cloud.CloudTokenDataPlaneHandler", null, "jdisc-security-filters", null));
addServerBindings(SystemBindingPattern.fromHttpPortAndPath(Defaults.getDefaults().vespaWebServicePort(), "/data-plane-tokens/v1"));
}
@Override public void getConfig(Builder builder) { tokenFilter.getConfig(builder); }
}
cluster.addComponent(new CloudTokenDataPlaneHandler());
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
private static final int MIN_ZOOKEEPER_NODE_COUNT = 1;
private static final int MAX_ZOOKEEPER_NODE_COUNT = 7;
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds = List.of(ConfigModelId.fromName(CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilderBase<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, TreeConfigProducer<AnyConfigProducer> ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec, deployState);
addProcessing(deployState, spec, cluster, context);
addSearch(deployState, spec, cluster, context);
addDocproc(deployState, spec, cluster);
addDocumentApi(deployState, spec, cluster, context);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec, context);
addClients(deployState, spec, cluster);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addNodes(cluster, spec, context);
addModelEvaluationRuntime(cluster);
addModelEvaluation(spec, cluster, context);
addServerProviders(deployState, spec, cluster);
if (!standaloneBuilder) cluster.addAllPlatformBundles();
addDeploymentSpecConfig(cluster, context, deployState.getDeployLogger());
addZooKeeper(cluster, spec);
addParameterStoreValidationHandler(cluster, deployState);
}
private void addParameterStoreValidationHandler(ApplicationContainerCluster cluster, DeployState deployState) {
if ( ! deployState.isHosted()) return;
cluster.addPlatformBundle(PlatformBundles.absoluteBundlePath("jdisc-cloud-aws"));
if (deployState.zone().system().isPublic()) {
BindingPattern bindingPattern = SystemBindingPattern.fromHttpPath("/validate-secret-store");
Handler handler = new Handler(
new ComponentModel("com.yahoo.jdisc.cloud.aws.AwsParameterStoreValidationHandler", null, "jdisc-cloud-aws", null));
handler.addServerBindings(bindingPattern);
cluster.addComponent(handler);
}
}
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) {
Element zooKeeper = getZooKeeper(spec);
if (zooKeeper == null) return;
Element nodesElement = XML.getChild(spec, "nodes");
boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of");
if (isCombined) {
throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper");
}
long nonRetiredNodes = cluster.getContainers().stream().filter(c -> !c.isRetired()).count();
if (nonRetiredNodes < MIN_ZOOKEEPER_NODE_COUNT || nonRetiredNodes > MAX_ZOOKEEPER_NODE_COUNT || nonRetiredNodes % 2 == 0) {
throw new IllegalArgumentException("Cluster with ZooKeeper needs an odd number of nodes, between " +
MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT +
", have " + nonRetiredNodes + " non-retired");
}
cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade");
cluster.addSimpleComponent("com.yahoo.vespa.curator.CuratorWrapper", null, "zkfacade");
String sessionTimeoutSeconds = zooKeeper.getAttribute("session-timeout-seconds");
if ( ! sessionTimeoutSeconds.isBlank()) {
try {
int timeoutSeconds = Integer.parseInt(sessionTimeoutSeconds);
if (timeoutSeconds <= 0) throw new IllegalArgumentException("must be a positive value");
cluster.setZookeeperSessionTimeoutSeconds(timeoutSeconds);
}
catch (RuntimeException e) {
throw new IllegalArgumentException("invalid zookeeper session-timeout-seconds '" + sessionTimeoutSeconds + "'", e);
}
}
cluster.getContainers().forEach(ContainerModelBuilder::addReconfigurableZooKeeperServerComponents);
}
public static void addReconfigurableZooKeeperServerComponents(Container container) {
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.Reconfigurer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", container));
}
private static SimpleComponent zookeeperComponent(String idSpec, Container container) {
String configId = container.getConfigId();
return new SimpleComponent(new ComponentModel(idSpec, null, "zookeeper-server", configId));
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec, DeployState deployState) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
String type = secretStoreElement.getAttribute("type");
if ("cloud".equals(type)) {
addCloudSecretStore(cluster, secretStoreElement, deployState);
} else {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
}
private void addCloudSecretStore(ApplicationContainerCluster cluster, Element secretStoreElement, DeployState deployState) {
if ( ! deployState.isHosted()) return;
if ( ! cluster.getZone().system().isPublic())
throw new IllegalArgumentException("Cloud secret store is not supported in non-public system, see the documentation");
CloudSecretStore cloudSecretStore = new CloudSecretStore();
Map<String, TenantSecretStore> secretStoresByName = deployState.getProperties().tenantSecretStores()
.stream()
.collect(Collectors.toMap(
TenantSecretStore::getName,
store -> store
));
Element store = XML.getChild(secretStoreElement, "store");
for (Element group : XML.getChildren(store, "aws-parameter-store")) {
String account = group.getAttribute("account");
String region = group.getAttribute("aws-region");
TenantSecretStore secretStore = secretStoresByName.get(account);
if (secretStore == null)
throw new IllegalArgumentException("No configured secret store named " + account);
if (secretStore.getExternalId().isEmpty())
throw new IllegalArgumentException("No external ID has been set");
cloudSecretStore.addConfig(account, region, secretStore.getAwsId(), secretStore.getRole(), secretStore.getExternalId().get());
}
cluster.addComponent(cloudSecretStore);
}
private void addDeploymentSpecConfig(ApplicationContainerCluster cluster, ConfigModelContext context, DeployLogger deployLogger) {
if ( ! context.getDeployState().isHosted()) return;
DeploymentSpec deploymentSpec = app.getDeploymentSpec();
if (deploymentSpec.isEmpty()) return;
for (var deprecatedElement : deploymentSpec.deprecatedElements()) {
deployLogger.logApplicationPackage(WARNING, deprecatedElement.humanReadableString());
}
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().getEndpoints());
}
private void addRotationProperties(ApplicationContainerCluster cluster, Set<ContainerEndpoint> endpoints) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", "true");
});
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.filter(endpoint -> endpoint.scope() == ApplicationClusterEndpoint.Scope.global)
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toCollection(LinkedHashSet::new));
container.setProp("rotations", String.join(",", rotationsProperty));
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element parent) {
for (Element components : XML.getChildren(parent, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, parent, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(
name + "-status-handler",
statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
SystemBindingPattern.fromHttpPath("/" + name)));
} else {
cluster.addVipHandler();
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
protected void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault()) {
cluster.addAccessLog();
} else {
if (cluster.isHostedVespa()) {
log.logApplicationPackage(WARNING, "Applications are not allowed to override the 'accesslog' element");
} else {
List<AccessLogComponent> components = new ArrayList<>();
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(accessLogComponent -> {
components.add(accessLogComponent);
cluster.addComponent(accessLogComponent);
});
}
if (components.size() > 0) {
cluster.removeSimpleComponent(VoidRequestLog.class);
cluster.addSimpleComponent(AccessLog.class);
}
}
}
if (cluster.getAllComponents().stream().anyMatch(component -> component instanceof AccessLogComponent))
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "access"));
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement, context));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addHostedImplicitAccessControlIfNotPresent(deployState, cluster);
addDefaultConnectorHostedFilterBinding(cluster);
addCloudMtlsConnector(deployState, cluster);
addCloudDataPlaneFilter(deployState, cluster);
addCloudTokenSupport(deployState, cluster);
}
}
private static void addCloudDataPlaneFilter(DeployState deployState, ApplicationContainerCluster cluster) {
if (!deployState.isHosted() || !deployState.zone().system().isPublic()) return;
var dataplanePort = getMtlsDataplanePort(deployState);
var secureChain = new HttpFilterChain("cloud-data-plane-secure", HttpFilterChain.Type.SYSTEM);
secureChain.addInnerComponent(new CloudDataPlaneFilter(cluster, deployState));
cluster.getHttp().getFilterChains().add(secureChain);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == dataplanePort).findAny().orElseThrow()
.setDefaultRequestFilterChain(secureChain.getComponentId());
var insecureChain = new HttpFilterChain("cloud-data-plane-insecure", HttpFilterChain.Type.SYSTEM);
insecureChain.addInnerComponent(new Filter(
new ChainedComponentModel(
new BundleInstantiationSpecification(
new ComponentSpecification("com.yahoo.jdisc.http.filter.security.misc.NoopFilter"),
null, new ComponentSpecification("jdisc-security-filters")),
Dependencies.emptyDependencies())));
cluster.getHttp().getFilterChains().add(insecureChain);
var insecureChainComponentSpec = new ComponentSpecification(insecureChain.getComponentId().toString());
FilterBinding insecureBinding =
FilterBinding.create(FilterBinding.Type.REQUEST, insecureChainComponentSpec, VIP_HANDLER_BINDING);
cluster.getHttp().getBindings().add(insecureBinding);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == Defaults.getDefaults().vespaWebServicePort()).findAny().orElseThrow()
.setDefaultRequestFilterChain(insecureChain.getComponentId());
}
protected void addClients(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
if (!deployState.isHosted() || !deployState.zone().system().isPublic()) return;
List<Client> clients;
Element clientsElement = XML.getChild(spec, "clients");
boolean legacyMode = false;
if (clientsElement == null) {
clients = List.of(new Client(
"default", List.of(), getCertificates(app.getFile(Path.fromString("security/clients.pem"))), List.of()));
legacyMode = true;
} else {
clients = XML.getChildren(clientsElement, "client").stream()
.flatMap(elem -> getClient(elem, deployState).stream())
.toList();
boolean atLeastOneClientWithCertificate = clients.stream().anyMatch(client -> !client.certificates().isEmpty());
if (!atLeastOneClientWithCertificate)
throw new IllegalArgumentException("At least one client must require a certificate");
}
List<X509Certificate> operatorAndTesterCertificates = deployState.getProperties().operatorCertificates();
if(!operatorAndTesterCertificates.isEmpty())
clients = Stream.concat(clients.stream(), Stream.of(Client.internalClient(operatorAndTesterCertificates))).toList();
cluster.setClients(legacyMode, clients);
}
private Optional<Client> getClient(Element clientElement, DeployState state) {
String clientId = XML.attribute("id", clientElement).orElseThrow();
if (clientId.startsWith("_"))
throw new IllegalArgumentException("Invalid client id '%s', id cannot start with '_'".formatted(clientId));
List<String> permissions = XML.attribute("permissions", clientElement)
.map(p -> p.split(",")).stream()
.flatMap(Arrays::stream)
.toList();
var certificates = XML.getChildren(clientElement, "certificate").stream()
.flatMap(certElem -> {
var file = app.getFile(Path.fromString(certElem.getAttribute("file")));
if (!file.exists()) {
throw new IllegalArgumentException("Certificate file '%s' for client '%s' does not exist"
.formatted(file.getPath().getRelative(), clientId));
}
return getCertificates(file).stream();
})
.toList();
if (!certificates.isEmpty()) return Optional.of(new Client(clientId, permissions, certificates, List.of()));
var knownTokens = state.getProperties().dataplaneTokens().stream()
.collect(Collectors.toMap(DataplaneToken::tokenId, Function.identity()));
var referencedTokens = XML.getChildren(clientElement, "token").stream()
.map(elem -> {
var tokenId = elem.getAttribute("id");
var token = knownTokens.get(tokenId);
if (token == null)
log.logApplicationPackage(
WARNING, "Token '%s' for client '%s' does not exist".formatted(tokenId, clientId));
return token;
})
.filter(token -> {
if (token == null) return false;
boolean empty = token.versions().isEmpty();
if (empty)
log.logApplicationPackage(
WARNING, "Token '%s' for client '%s' has no active versions"
.formatted(token.tokenId(), clientId));
return !empty;
})
.toList();
if (referencedTokens.isEmpty()) {
log.log(Level.INFO, "Skipping client '%s' as it does not refer to any activate tokens".formatted(clientId));
return Optional.empty();
}
return Optional.of(new Client(clientId, permissions, List.of(), referencedTokens));
}
private List<X509Certificate> getCertificates(ApplicationFile file) {
if (!file.exists()) return List.of();
try {
Reader reader = file.createReader();
String certPem = IOUtils.readAll(reader);
reader.close();
List<X509Certificate> x509Certificates = X509CertificateUtils.certificateListFromPem(certPem);
if (x509Certificates.isEmpty()) {
throw new IllegalArgumentException("File %s does not contain any certificates.".formatted(file.getPath().getRelative()));
}
return x509Certificates;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) {
cluster.getHttp().getAccessControl()
.ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp()));
}
private void addCloudMtlsConnector(DeployState state, ApplicationContainerCluster cluster) {
JettyHttpServer server = cluster.getHttp().getHttpServer().get();
String serverName = server.getComponentId().getName();
var builder = HostedSslConnectorFactory.builder(serverName, getMtlsDataplanePort(state))
.proxyProtocol(true, state.getProperties().featureFlags().enableProxyProtocolMixedMode())
.tlsCiphersOverride(state.getProperties().tlsCiphersOverride())
.endpointConnectionTtl(state.getProperties().endpointConnectionTtl());
var endpointCert = state.endpointCertificateSecrets().orElse(null);
if (endpointCert != null) {
builder.endpointCertificate(endpointCert);
boolean isPublic = state.zone().system().isPublic();
List<X509Certificate> clientCertificates = getClientCertificates(cluster);
if (isPublic) {
if (clientCertificates.isEmpty())
throw new IllegalArgumentException("Client certificate authority security/clients.pem is missing - " +
"see: https:
builder.tlsCaCertificatesPem(X509CertificateUtils.toPem(clientCertificates))
.clientAuth(SslClientAuth.WANT_WITH_ENFORCER);
} else {
builder.tlsCaCertificatesPath("/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem");
var needAuth = cluster.getHttp().getAccessControl()
.map(accessControl -> accessControl.clientAuthentication)
.map(clientAuth -> clientAuth == AccessControl.ClientAuthentication.need)
.orElse(false);
builder.clientAuth(needAuth ? SslClientAuth.NEED : SslClientAuth.WANT);
}
} else {
builder.clientAuth(SslClientAuth.WANT_WITH_ENFORCER);
}
var connectorFactory = builder.build();
cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory));
server.addConnector(connectorFactory);
}
private List<X509Certificate> getClientCertificates(ApplicationContainerCluster cluster) {
return cluster.getClients()
.stream()
.map(Client::certificates)
.flatMap(Collection::stream)
.toList();
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
return context.getDeployState().isHostedTenantApplication(context.getApplicationType());
}
private static void addHostedImplicitHttpIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
if (cluster.getHttp() == null) {
cluster.setHttp(new Http(new FilterChains(cluster)));
}
JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null);
if (httpServer == null) {
httpServer = new JettyHttpServer("DefaultHttpServer", cluster, deployState);
cluster.getHttp().setHttpServer(httpServer);
}
int defaultPort = Defaults.getDefaults().vespaWebServicePort();
boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort);
if (!defaultConnectorPresent) {
httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build());
}
}
private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
Http http = cluster.getHttp();
if (http.getAccessControl().isPresent()) return;
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
if (tenantDomain == null) return;
new AccessControl.Builder(tenantDomain.value())
.setHandlers(cluster)
.clientAuthentication(AccessControl.ClientAuthentication.need)
.build()
.configureHttpFilterChains(http);
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement, ConfigModelContext context) {
Http http = new HttpBuilder(portBindingOverride(deployState, context)).build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addDocumentApi(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(deployState, cluster, spec, context);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(deployState, cluster, searchElement, context);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
FileDistributedOnnxModels models = profiles.getOnnxModels().clone();
Element onnxElement = XML.getChild(modelEvaluationElement, "onnx");
Element modelsElement = XML.getChild(onnxElement, "models");
for (Element modelElement : XML.getChildren(modelsElement, "model") ) {
OnnxModel onnxModel = models.asMap().get(modelElement.getAttribute("name"));
if (onnxModel == null) {
String availableModels = String.join(", ", profiles.getOnnxModels().asMap().keySet());
context.getDeployState().getDeployLogger().logApplicationPackage(WARNING,
"Model '" + modelElement.getAttribute("name") + "' not found. Available ONNX " +
"models are: " + availableModels + ". Skipping this configuration.");
continue;
}
onnxModel.setStatelessExecutionMode(getStringValue(modelElement, "execution-mode", null));
onnxModel.setStatelessInterOpThreads(getIntValue(modelElement, "interop-threads", -1));
onnxModel.setStatelessIntraOpThreads(getIntValue(modelElement, "intraop-threads", -1));
Element gpuDeviceElement = XML.getChild(modelElement, "gpu-device");
if (gpuDeviceElement != null) {
int gpuDevice = Integer.parseInt(gpuDeviceElement.getTextContent());
boolean hasGpu = cluster.getContainers().stream().anyMatch(container -> container.getHostResource() != null &&
!container.getHostResource().realResources().gpuResources().isZero());
onnxModel.setGpuDevice(gpuDevice, hasGpu);
}
cluster.onnxModelCost().registerModel(context.getApplicationPackage().getFile(onnxModel.getFilePath()));
}
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles, models));
}
private String getStringValue(Element element, String name, String defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? child.getTextContent() : defaultValue;
}
private int getIntValue(Element element, String name, int defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? Integer.parseInt(child.getTextContent()) : defaultValue;
}
protected void addModelEvaluationRuntime(ApplicationContainerCluster cluster) {
/* These bundles are added to all application container clusters, even if they haven't
* declared 'model-evaluation' in services.xml, because there are many public API packages
* in the model-evaluation bundle that could be used by customer code. */
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_EVALUATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_INTEGRATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.ONNXRUNTIME_BUNDLE_FILE);
/* The ONNX runtime is always available for injection to any component */
cluster.addSimpleComponent(
ContainerModelEvaluation.ONNX_RUNTIME_CLASS, null, ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME);
/* Add runtime providing utilities such as metrics to embedder implementations */
cluster.addSimpleComponent(
"ai.vespa.embedding.EmbedderRuntime", null, ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME);
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
cluster.addSearchAndDocprocBundles();
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(deployState, context, processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder()
.build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(deployState, containerCluster, searchChains);
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster, portBindingOverride(deployState, context)).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1)))
throw new IllegalArgumentException("Expected container version to be 1.0, but got " + version);
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster, context.getDeployState());
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster, DeployState deployState) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), deployState);
cluster.addContainers(Collections.singleton(container));
}
private static String buildJvmGCOptions(ConfigModelContext context, String jvmGCOptions) {
return new JvmGcOptions(context.getDeployState(), jvmGCOptions).build();
}
private static String getJvmOptions(Element nodesElement,
DeployState deployState,
boolean legacyOptions) {
return new JvmOptions(nodesElement, deployState, legacyOptions).build();
}
private static String extractAttribute(Element element, String attrName) {
return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null;
}
private void extractJvmOptions(List<ApplicationContainer> nodes,
ApplicationContainerCluster cluster,
Element nodesElement,
ConfigModelContext context) {
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, nodesElement, jvmElement, context);
}
}
private void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(nodesElement, context.getDeployState(), true));
if (cluster.getJvmGCOptions().isEmpty()) {
String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS);
if (jvmGCOptions != null && !jvmGCOptions.isEmpty()) {
DeployLogger logger = context.getDeployState().getDeployLogger();
logger.logApplicationPackage(WARNING, "'jvm-gc-options' is deprecated and will be removed in Vespa 9." +
" Please merge into 'gc-options' in 'jvm' element." +
" See https:
}
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
if (applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)))
context.getDeployState().getDeployLogger()
.logApplicationPackage(WARNING, "'allocated-memory' is deprecated and will be removed in Vespa 9." +
" Please merge into 'allocated-memory' in 'jvm' element." +
" See https:
}
private void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(nodesElement, context.getDeployState(), false));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
/**
* Add nodes to cluster according to the given containerElement.
*
* Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set
* of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed
* simultaneously for all active config models.
*/
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context);
extractJvmOptions(nodes, cluster, nodesElement, context);
applyDefaultPreload(nodes, nodesElement);
var envVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT)).entrySet();
for (var container : nodes) {
for (var entry : envVars) {
container.addEnvironmentVariable(entry.getKey(), entry.getValue());
}
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private ZoneEndpoint zoneEndpoint(ConfigModelContext context, ClusterSpec.Id cluster) {
InstanceName instance = context.properties().applicationId().instance();
ZoneId zone = ZoneId.from(context.properties().zone().environment(),
context.properties().zone().region());
return context.getApplicationPackage().getDeploymentSpec().zoneEndpoint(instance, zone, cluster);
}
private static Map<String, String> getEnvironmentVariables(Element environmentVariables) {
var map = new LinkedHashMap<String, String>();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
var name = new com.yahoo.text.Identifier(var.getNodeName());
map.put(name.toString(), var.getTextContent());
}
}
return map;
}
private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element containerElement,
Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of")) {
List<ApplicationContainer> containers = createNodesFromContentServiceReference(cluster, nodesElement, context);
log.logApplicationPackage(WARNING, "Declaring combined cluster with <nodes of=\"...\"> is deprecated without " +
"replacement, and the feature will be removed in Vespa 9. Use separate container and " +
"content clusters instead");
return containers;
} else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private static boolean applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
try {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return false;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("Missing % sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
return true;
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign", e);
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
deployState.getDeployLogger().logApplicationPackage(Level.INFO, "Using " + nodeCount + " nodes in " + cluster);
var nodesSpec = NodesSpecification.dedicated(nodeCount, context);
ClusterSpec.Id clusterId = ClusterSpec.Id.from(cluster.getName());
var hosts = nodesSpec.provision(hostSystem,
ClusterSpec.Type.container,
clusterId,
zoneEndpoint(context, clusterId),
deployState.getDeployLogger(),
false,
context.clusterInfo().build());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
else {
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
}
}
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, context.getDeployState());
node.setHostResource(host);
node.initService(context.getDeployState());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
try {
var nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
var clusterId = ClusterSpec.Id.from(cluster.name());
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
clusterId,
zoneEndpoint(context, clusterId),
log,
getZooKeeper(containerElement) != null,
context.clusterInfo().build());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("In " + cluster, e);
}
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepository(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), log);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromHosts(Map<HostResource, ClusterMembership> hosts,
ApplicationContainerCluster cluster,
DeployState deployState) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), deployState);
container.setHostResource(entry.getKey());
container.initService(deployState);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(DeployState deployState, ApplicationContainerCluster cluster, Element searchElement, ConfigModelContext context) {
var bindingPatterns = List.<BindingPattern>of(SearchHandler.DEFAULT_BINDING);
if (isHostedTenantApplication(context)) {
bindingPatterns = SearchHandler.bindingPattern(getDataplanePorts(deployState));
}
SearchHandler searchHandler = new SearchHandler(cluster,
serverBindings(deployState, context, searchElement, bindingPatterns),
ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null));
cluster.addComponent(searchHandler);
searchHandler.addComponent(Component.fromClassAndBundle(SearchHandler.EXECUTION_FACTORY, PlatformBundles.SEARCH_AND_DOCPROC_BUNDLE));
}
private List<BindingPattern> serverBindings(DeployState deployState, ConfigModelContext context, Element searchElement, Collection<BindingPattern> defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return List.copyOf(defaultBindings);
return toBindingList(deployState, context, bindings);
}
private List<BindingPattern> toBindingList(DeployState deployState, ConfigModelContext context, List<Element> bindingElements) {
List<BindingPattern> result = new ArrayList<>();
var portOverride = isHostedTenantApplication(context) ? getDataplanePorts(deployState) : Set.<Integer>of();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.addAll(userBindingPattern(text, portOverride));
}
return result;
}
private static Collection<UserBindingPattern> userBindingPattern(String path, Set<Integer> portBindingOverride) {
UserBindingPattern bindingPattern = UserBindingPattern.fromPattern(path);
if (portBindingOverride.isEmpty()) return Set.of(bindingPattern);
return portBindingOverride.stream()
.map(bindingPattern::withOverriddenPort)
.toList();
}
private ContainerDocumentApi buildDocumentApi(DeployState deployState, ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.HandlerOptions documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
Element ignoreUndefinedFields = XML.getChild(documentApiElement, "ignore-undefined-fields");
return new ContainerDocumentApi(cluster, documentApiOptions,
"true".equals(XML.getValue(ignoreUndefinedFields)), portBindingOverride(deployState, context));
}
private Set<Integer> portBindingOverride(DeployState deployState, ConfigModelContext context) {
return isHostedTenantApplication(context)
? getDataplanePorts(deployState)
: Set.<Integer>of();
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement, deployState.getDeployLogger());
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element parent, String componentName) {
for (Element component : XML.getChildren(parent, componentName)) {
ModelIdResolver.resolveModelIds(component, deployState.isHosted());
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, component));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec,
String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.athenzService(app.getApplicationId().instance(), zone.environment(), zone.region())
.orElseThrow(() -> new IllegalArgumentException("Missing Athenz service configuration in instance '" +
app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain,
service,
getLoadBalancerName(loadBalancerName, configServerSpecs),
ztsUrl,
zoneDnsSuffix,
zone);
cluster.removeComponent(ComponentId.fromString("com.yahoo.container.jdisc.AthenzIdentityProviderProvider"));
cluster.addComponent(identityProvider);
var serviceIdentityProviderProvider = "com.yahoo.vespa.athenz.identityprovider.client.ServiceIdentityProviderProvider";
cluster.addComponent(new SimpleComponent(new ComponentModel(serviceIdentityProviderProvider, serviceIdentityProviderProvider, "vespa-athenz")));
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.of(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
private static Element getZooKeeper(Element spec) {
return XML.getChild(spec, "zookeeper");
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName());
}
/**
* Validates JVM options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system.
*/
private static class JvmOptions {
private static final Pattern validPattern = Pattern.compile("-[a-zA-z0-9=:./,+*-]+");
private static final Pattern invalidInHostedPattern = Pattern.compile("-Xrunjdwp:transport=.*");
private final Element nodesElement;
private final DeployLogger logger;
private final boolean legacyOptions;
private final boolean isHosted;
public JvmOptions(Element nodesElement, DeployState deployState, boolean legacyOptions) {
this.nodesElement = nodesElement;
this.logger = deployState.getDeployLogger();
this.legacyOptions = legacyOptions;
this.isHosted = deployState.isHosted();
}
String build() {
if (legacyOptions)
return buildLegacyOptions();
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) return "";
String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS);
if (jvmOptions.isEmpty()) return "";
validateJvmOptions(jvmOptions);
return jvmOptions;
}
String buildLegacyOptions() {
String jvmOptions = null;
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (! jvmOptions.isEmpty())
logger.logApplicationPackage(WARNING, "'jvm-options' is deprecated and will be removed in Vespa 9." +
" Please merge 'jvm-options' into 'options' or 'gc-options' in 'jvm' element." +
" See https:
}
validateJvmOptions(jvmOptions);
return jvmOptions;
}
private void validateJvmOptions(String jvmOptions) {
if (jvmOptions == null || jvmOptions.isEmpty()) return;
String[] optionList = jvmOptions.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option))
.sorted()
.collect(Collectors.toCollection(ArrayList::new));
if (isHosted)
invalidOptions.addAll(Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> Pattern.matches(invalidInHostedPattern.pattern(), option))
.sorted().toList());
if (invalidOptions.isEmpty()) return;
String message = "Invalid or misplaced JVM options in services.xml: " +
String.join(",", invalidOptions) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
/**
* Validates JVM GC options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system
* (e.g. uses CMS options for hosted Vespa, which uses JDK 17).
*/
private static class JvmGcOptions {
private static final Pattern validPattern = Pattern.compile("-XX:[+-]*[a-zA-z0-9=]+");
private static final Pattern invalidCMSPattern = Pattern.compile("-XX:[+-]\\w*CMS[a-zA-z0-9=]+");
private final DeployState deployState;
private final String jvmGcOptions;
private final DeployLogger logger;
private final boolean isHosted;
public JvmGcOptions(DeployState deployState, String jvmGcOptions) {
this.deployState = deployState;
this.jvmGcOptions = jvmGcOptions;
this.logger = deployState.getDeployLogger();
this.isHosted = deployState.isHosted();
}
private String build() {
String options = deployState.getProperties().jvmGCOptions();
if (jvmGcOptions != null) {
options = jvmGcOptions;
String[] optionList = options.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option)
|| Pattern.matches(invalidCMSPattern.pattern(), option)
|| option.equals("-XX:+UseConcMarkSweepGC"))
.sorted()
.toList();
logOrFailInvalidOptions(invalidOptions);
}
if (options == null || options.isEmpty())
options = deployState.isHosted() ? ContainerCluster.PARALLEL_GC : ContainerCluster.G1GC;
return options;
}
private void logOrFailInvalidOptions(List<String> options) {
if (options.isEmpty()) return;
String message = "Invalid or misplaced JVM GC options in services.xml: " +
String.join(",", options) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
private static Set<Integer> getDataplanePorts(DeployState ds) {
var tokenPort = getTokenDataplanePort(ds);
var mtlsPort = getMtlsDataplanePort(ds);
return tokenPort.isPresent() ? Set.of(mtlsPort, tokenPort.getAsInt()) : Set.of(mtlsPort);
}
private static int getMtlsDataplanePort(DeployState ds) {
return enableTokenSupport(ds) ? 8443 : 4443;
}
private static OptionalInt getTokenDataplanePort(DeployState ds) {
return enableTokenSupport(ds) ? OptionalInt.of(8444) : OptionalInt.empty();
}
private static Set<ContainerEndpoint> tokenEndpoints(DeployState deployState) {
return deployState.getEndpoints().stream()
.filter(endpoint -> endpoint.authMethod() == ApplicationClusterEndpoint.AuthMethod.token)
.collect(Collectors.toSet());
}
private static boolean enableTokenSupport(DeployState state) {
Set<ContainerEndpoint> tokenEndpoints = tokenEndpoints(state);
return state.isHosted() && state.zone().system().isPublic() && ! tokenEndpoints.isEmpty();
}
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
private static final String CONTAINER_TAG = "container";
private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables";
private static final int MIN_ZOOKEEPER_NODE_COUNT = 1;
private static final int MAX_ZOOKEEPER_NODE_COUNT = 7;
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds = List.of(ConfigModelId.fromName(CONTAINER_TAG));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
log = modelContext.getDeployLogger();
app = modelContext.getApplicationPackage();
checkVersion(spec);
ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
cluster.setMessageBusEnabled(rpcServerEnabled);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilderBase<ApplicationContainerCluster>() {
@Override
protected ApplicationContainerCluster doBuild(DeployState deployState, TreeConfigProducer<AnyConfigProducer> ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
}.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec);
}
private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(deployState, cluster, spec);
addSecretStore(cluster, spec, deployState);
addProcessing(deployState, spec, cluster, context);
addSearch(deployState, spec, cluster, context);
addDocproc(deployState, spec, cluster);
addDocumentApi(deployState, spec, cluster, context);
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
addUserHandlers(deployState, cluster, spec, context);
addClients(deployState, spec, cluster);
addHttp(deployState, spec, cluster, context);
addAccessLogs(deployState, cluster, spec);
addNodes(cluster, spec, context);
addModelEvaluationRuntime(cluster);
addModelEvaluation(spec, cluster, context);
addServerProviders(deployState, spec, cluster);
if (!standaloneBuilder) cluster.addAllPlatformBundles();
addDeploymentSpecConfig(cluster, context, deployState.getDeployLogger());
addZooKeeper(cluster, spec);
addParameterStoreValidationHandler(cluster, deployState);
}
private void addParameterStoreValidationHandler(ApplicationContainerCluster cluster, DeployState deployState) {
if ( ! deployState.isHosted()) return;
cluster.addPlatformBundle(PlatformBundles.absoluteBundlePath("jdisc-cloud-aws"));
if (deployState.zone().system().isPublic()) {
BindingPattern bindingPattern = SystemBindingPattern.fromHttpPath("/validate-secret-store");
Handler handler = new Handler(
new ComponentModel("com.yahoo.jdisc.cloud.aws.AwsParameterStoreValidationHandler", null, "jdisc-cloud-aws", null));
handler.addServerBindings(bindingPattern);
cluster.addComponent(handler);
}
}
private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) {
Element zooKeeper = getZooKeeper(spec);
if (zooKeeper == null) return;
Element nodesElement = XML.getChild(spec, "nodes");
boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of");
if (isCombined) {
throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper");
}
long nonRetiredNodes = cluster.getContainers().stream().filter(c -> !c.isRetired()).count();
if (nonRetiredNodes < MIN_ZOOKEEPER_NODE_COUNT || nonRetiredNodes > MAX_ZOOKEEPER_NODE_COUNT || nonRetiredNodes % 2 == 0) {
throw new IllegalArgumentException("Cluster with ZooKeeper needs an odd number of nodes, between " +
MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT +
", have " + nonRetiredNodes + " non-retired");
}
cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade");
cluster.addSimpleComponent("com.yahoo.vespa.curator.CuratorWrapper", null, "zkfacade");
String sessionTimeoutSeconds = zooKeeper.getAttribute("session-timeout-seconds");
if ( ! sessionTimeoutSeconds.isBlank()) {
try {
int timeoutSeconds = Integer.parseInt(sessionTimeoutSeconds);
if (timeoutSeconds <= 0) throw new IllegalArgumentException("must be a positive value");
cluster.setZookeeperSessionTimeoutSeconds(timeoutSeconds);
}
catch (RuntimeException e) {
throw new IllegalArgumentException("invalid zookeeper session-timeout-seconds '" + sessionTimeoutSeconds + "'", e);
}
}
cluster.getContainers().forEach(ContainerModelBuilder::addReconfigurableZooKeeperServerComponents);
}
public static void addReconfigurableZooKeeperServerComponents(Container container) {
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.Reconfigurer", container));
container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", container));
}
private static SimpleComponent zookeeperComponent(String idSpec, Container container) {
String configId = container.getConfigId();
return new SimpleComponent(new ComponentModel(idSpec, null, "zookeeper-server", configId));
}
private void addSecretStore(ApplicationContainerCluster cluster, Element spec, DeployState deployState) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
String type = secretStoreElement.getAttribute("type");
if ("cloud".equals(type)) {
addCloudSecretStore(cluster, secretStoreElement, deployState);
} else {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
}
private void addCloudSecretStore(ApplicationContainerCluster cluster, Element secretStoreElement, DeployState deployState) {
if ( ! deployState.isHosted()) return;
if ( ! cluster.getZone().system().isPublic())
throw new IllegalArgumentException("Cloud secret store is not supported in non-public system, see the documentation");
CloudSecretStore cloudSecretStore = new CloudSecretStore();
Map<String, TenantSecretStore> secretStoresByName = deployState.getProperties().tenantSecretStores()
.stream()
.collect(Collectors.toMap(
TenantSecretStore::getName,
store -> store
));
Element store = XML.getChild(secretStoreElement, "store");
for (Element group : XML.getChildren(store, "aws-parameter-store")) {
String account = group.getAttribute("account");
String region = group.getAttribute("aws-region");
TenantSecretStore secretStore = secretStoresByName.get(account);
if (secretStore == null)
throw new IllegalArgumentException("No configured secret store named " + account);
if (secretStore.getExternalId().isEmpty())
throw new IllegalArgumentException("No external ID has been set");
cloudSecretStore.addConfig(account, region, secretStore.getAwsId(), secretStore.getRole(), secretStore.getExternalId().get());
}
cluster.addComponent(cloudSecretStore);
}
private void addDeploymentSpecConfig(ApplicationContainerCluster cluster, ConfigModelContext context, DeployLogger deployLogger) {
if ( ! context.getDeployState().isHosted()) return;
DeploymentSpec deploymentSpec = app.getDeploymentSpec();
if (deploymentSpec.isEmpty()) return;
for (var deprecatedElement : deploymentSpec.deprecatedElements()) {
deployLogger.logApplicationPackage(WARNING, deprecatedElement.humanReadableString());
}
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().getProperties().ztsUrl(),
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().getEndpoints());
}
private void addRotationProperties(ApplicationContainerCluster cluster, Set<ContainerEndpoint> endpoints) {
cluster.getContainers().forEach(container -> {
setRotations(container, endpoints, cluster.getName());
container.setProp("activeRotation", "true");
});
}
private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) {
var rotationsProperty = endpoints.stream()
.filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
.filter(endpoint -> endpoint.scope() == ApplicationClusterEndpoint.Scope.global)
.flatMap(endpoint -> endpoint.names().stream())
.collect(Collectors.toCollection(LinkedHashSet::new));
container.setProp("rotations", String.join(",", rotationsProperty));
}
private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element parent) {
for (Element components : XML.getChildren(parent, "components")) {
addIncludes(components);
addConfiguredComponents(deployState, cluster, components, "component");
}
addConfiguredComponents(deployState, cluster, parent, "component");
}
protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) {
if (isHostedVespa) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(
name + "-status-handler",
statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
SystemBindingPattern.fromHttpPath("/" + name)));
} else {
cluster.addVipHandler();
}
}
private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
addConfiguredComponents(deployState, cluster, spec, "server");
}
protected void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault()) {
cluster.addAccessLog();
} else {
if (cluster.isHostedVespa()) {
log.logApplicationPackage(WARNING, "Applications are not allowed to override the 'accesslog' element");
} else {
List<AccessLogComponent> components = new ArrayList<>();
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(accessLogComponent -> {
components.add(accessLogComponent);
cluster.addComponent(accessLogComponent);
});
}
if (components.size() > 0) {
cluster.removeSimpleComponent(VoidRequestLog.class);
cluster.addSimpleComponent(AccessLog.class);
}
}
}
if (cluster.getAllComponents().stream().anyMatch(component -> component instanceof AccessLogComponent))
cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "access"));
}
private List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(deployState, cluster, httpElement, context));
}
if (isHostedTenantApplication(context)) {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addHostedImplicitAccessControlIfNotPresent(deployState, cluster);
addDefaultConnectorHostedFilterBinding(cluster);
addCloudMtlsConnector(deployState, cluster);
addCloudDataPlaneFilter(deployState, cluster);
addCloudTokenSupport(deployState, cluster);
}
}
private static void addCloudDataPlaneFilter(DeployState deployState, ApplicationContainerCluster cluster) {
if (!deployState.isHosted() || !deployState.zone().system().isPublic()) return;
var dataplanePort = getMtlsDataplanePort(deployState);
var secureChain = new HttpFilterChain("cloud-data-plane-secure", HttpFilterChain.Type.SYSTEM);
secureChain.addInnerComponent(new CloudDataPlaneFilter(cluster, deployState));
cluster.getHttp().getFilterChains().add(secureChain);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == dataplanePort).findAny().orElseThrow()
.setDefaultRequestFilterChain(secureChain.getComponentId());
var insecureChain = new HttpFilterChain("cloud-data-plane-insecure", HttpFilterChain.Type.SYSTEM);
insecureChain.addInnerComponent(new Filter(
new ChainedComponentModel(
new BundleInstantiationSpecification(
new ComponentSpecification("com.yahoo.jdisc.http.filter.security.misc.NoopFilter"),
null, new ComponentSpecification("jdisc-security-filters")),
Dependencies.emptyDependencies())));
cluster.getHttp().getFilterChains().add(insecureChain);
var insecureChainComponentSpec = new ComponentSpecification(insecureChain.getComponentId().toString());
FilterBinding insecureBinding =
FilterBinding.create(FilterBinding.Type.REQUEST, insecureChainComponentSpec, VIP_HANDLER_BINDING);
cluster.getHttp().getBindings().add(insecureBinding);
cluster.getHttp().getHttpServer().orElseThrow().getConnectorFactories().stream()
.filter(c -> c.getListenPort() == Defaults.getDefaults().vespaWebServicePort()).findAny().orElseThrow()
.setDefaultRequestFilterChain(insecureChain.getComponentId());
}
protected void addClients(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
if (!deployState.isHosted() || !deployState.zone().system().isPublic()) return;
List<Client> clients;
Element clientsElement = XML.getChild(spec, "clients");
boolean legacyMode = false;
if (clientsElement == null) {
clients = List.of(new Client(
"default", List.of(), getCertificates(app.getFile(Path.fromString("security/clients.pem"))), List.of()));
legacyMode = true;
} else {
clients = XML.getChildren(clientsElement, "client").stream()
.flatMap(elem -> getClient(elem, deployState).stream())
.toList();
boolean atLeastOneClientWithCertificate = clients.stream().anyMatch(client -> !client.certificates().isEmpty());
if (!atLeastOneClientWithCertificate)
throw new IllegalArgumentException("At least one client must require a certificate");
}
List<X509Certificate> operatorAndTesterCertificates = deployState.getProperties().operatorCertificates();
if(!operatorAndTesterCertificates.isEmpty())
clients = Stream.concat(clients.stream(), Stream.of(Client.internalClient(operatorAndTesterCertificates))).toList();
cluster.setClients(legacyMode, clients);
}
private Optional<Client> getClient(Element clientElement, DeployState state) {
String clientId = XML.attribute("id", clientElement).orElseThrow();
if (clientId.startsWith("_"))
throw new IllegalArgumentException("Invalid client id '%s', id cannot start with '_'".formatted(clientId));
List<String> permissions = XML.attribute("permissions", clientElement)
.map(p -> p.split(",")).stream()
.flatMap(Arrays::stream)
.toList();
var certificates = XML.getChildren(clientElement, "certificate").stream()
.flatMap(certElem -> {
var file = app.getFile(Path.fromString(certElem.getAttribute("file")));
if (!file.exists()) {
throw new IllegalArgumentException("Certificate file '%s' for client '%s' does not exist"
.formatted(file.getPath().getRelative(), clientId));
}
return getCertificates(file).stream();
})
.toList();
if (!certificates.isEmpty()) return Optional.of(new Client(clientId, permissions, certificates, List.of()));
var knownTokens = state.getProperties().dataplaneTokens().stream()
.collect(Collectors.toMap(DataplaneToken::tokenId, Function.identity()));
var referencedTokens = XML.getChildren(clientElement, "token").stream()
.map(elem -> {
var tokenId = elem.getAttribute("id");
var token = knownTokens.get(tokenId);
if (token == null)
log.logApplicationPackage(
WARNING, "Token '%s' for client '%s' does not exist".formatted(tokenId, clientId));
return token;
})
.filter(token -> {
if (token == null) return false;
boolean empty = token.versions().isEmpty();
if (empty)
log.logApplicationPackage(
WARNING, "Token '%s' for client '%s' has no active versions"
.formatted(token.tokenId(), clientId));
return !empty;
})
.toList();
if (referencedTokens.isEmpty()) {
log.log(Level.INFO, "Skipping client '%s' as it does not refer to any activate tokens".formatted(clientId));
return Optional.empty();
}
return Optional.of(new Client(clientId, permissions, List.of(), referencedTokens));
}
private List<X509Certificate> getCertificates(ApplicationFile file) {
if (!file.exists()) return List.of();
try {
Reader reader = file.createReader();
String certPem = IOUtils.readAll(reader);
reader.close();
List<X509Certificate> x509Certificates = X509CertificateUtils.certificateListFromPem(certPem);
if (x509Certificates.isEmpty()) {
throw new IllegalArgumentException("File %s does not contain any certificates.".formatted(file.getPath().getRelative()));
}
return x509Certificates;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) {
cluster.getHttp().getAccessControl()
.ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp()));
}
private void addCloudMtlsConnector(DeployState state, ApplicationContainerCluster cluster) {
JettyHttpServer server = cluster.getHttp().getHttpServer().get();
String serverName = server.getComponentId().getName();
var builder = HostedSslConnectorFactory.builder(serverName, getMtlsDataplanePort(state))
.proxyProtocol(true, state.getProperties().featureFlags().enableProxyProtocolMixedMode())
.tlsCiphersOverride(state.getProperties().tlsCiphersOverride())
.endpointConnectionTtl(state.getProperties().endpointConnectionTtl());
var endpointCert = state.endpointCertificateSecrets().orElse(null);
if (endpointCert != null) {
builder.endpointCertificate(endpointCert);
boolean isPublic = state.zone().system().isPublic();
List<X509Certificate> clientCertificates = getClientCertificates(cluster);
if (isPublic) {
if (clientCertificates.isEmpty())
throw new IllegalArgumentException("Client certificate authority security/clients.pem is missing - " +
"see: https:
builder.tlsCaCertificatesPem(X509CertificateUtils.toPem(clientCertificates))
.clientAuth(SslClientAuth.WANT_WITH_ENFORCER);
} else {
builder.tlsCaCertificatesPath("/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem");
var needAuth = cluster.getHttp().getAccessControl()
.map(accessControl -> accessControl.clientAuthentication)
.map(clientAuth -> clientAuth == AccessControl.ClientAuthentication.need)
.orElse(false);
builder.clientAuth(needAuth ? SslClientAuth.NEED : SslClientAuth.WANT);
}
} else {
builder.clientAuth(SslClientAuth.WANT_WITH_ENFORCER);
}
var connectorFactory = builder.build();
cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory));
server.addConnector(connectorFactory);
}
private List<X509Certificate> getClientCertificates(ApplicationContainerCluster cluster) {
return cluster.getClients()
.stream()
.map(Client::certificates)
.flatMap(Collection::stream)
.toList();
}
private static boolean isHostedTenantApplication(ConfigModelContext context) {
return context.getDeployState().isHostedTenantApplication(context.getApplicationType());
}
private static void addHostedImplicitHttpIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
if (cluster.getHttp() == null) {
cluster.setHttp(new Http(new FilterChains(cluster)));
}
JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null);
if (httpServer == null) {
httpServer = new JettyHttpServer("DefaultHttpServer", cluster, deployState);
cluster.getHttp().setHttpServer(httpServer);
}
int defaultPort = Defaults.getDefaults().vespaWebServicePort();
boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort);
if (!defaultConnectorPresent) {
httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build());
}
}
private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) {
Http http = cluster.getHttp();
if (http.getAccessControl().isPresent()) return;
AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null);
if (tenantDomain == null) return;
new AccessControl.Builder(tenantDomain.value())
.setHandlers(cluster)
.clientAuthentication(AccessControl.ClientAuthentication.need)
.build()
.configureHttpFilterChains(http);
}
private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement, ConfigModelContext context) {
Http http = new HttpBuilder(portBindingOverride(deployState, context)).build(deployState, cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
private void addDocumentApi(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(deployState, cluster, spec, context);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
}
private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec);
if (containerDocproc == null) return;
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ApplicationContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement == null) return;
addIncludes(searchElement);
cluster.setSearch(buildSearch(deployState, cluster, searchElement));
addSearchHandler(deployState, cluster, searchElement, context);
validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element modelEvaluationElement = XML.getChild(spec, "model-evaluation");
if (modelEvaluationElement == null) return;
RankProfileList profiles =
context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty;
FileDistributedOnnxModels models = profiles.getOnnxModels().clone();
Element onnxElement = XML.getChild(modelEvaluationElement, "onnx");
Element modelsElement = XML.getChild(onnxElement, "models");
for (Element modelElement : XML.getChildren(modelsElement, "model") ) {
OnnxModel onnxModel = models.asMap().get(modelElement.getAttribute("name"));
if (onnxModel == null) {
String availableModels = String.join(", ", profiles.getOnnxModels().asMap().keySet());
context.getDeployState().getDeployLogger().logApplicationPackage(WARNING,
"Model '" + modelElement.getAttribute("name") + "' not found. Available ONNX " +
"models are: " + availableModels + ". Skipping this configuration.");
continue;
}
onnxModel.setStatelessExecutionMode(getStringValue(modelElement, "execution-mode", null));
onnxModel.setStatelessInterOpThreads(getIntValue(modelElement, "interop-threads", -1));
onnxModel.setStatelessIntraOpThreads(getIntValue(modelElement, "intraop-threads", -1));
Element gpuDeviceElement = XML.getChild(modelElement, "gpu-device");
if (gpuDeviceElement != null) {
int gpuDevice = Integer.parseInt(gpuDeviceElement.getTextContent());
boolean hasGpu = cluster.getContainers().stream().anyMatch(container -> container.getHostResource() != null &&
!container.getHostResource().realResources().gpuResources().isZero());
onnxModel.setGpuDevice(gpuDevice, hasGpu);
}
cluster.onnxModelCost().registerModel(context.getApplicationPackage().getFile(onnxModel.getFilePath()));
}
cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles, models));
}
private String getStringValue(Element element, String name, String defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? child.getTextContent() : defaultValue;
}
private int getIntValue(Element element, String name, int defaultValue) {
Element child = XML.getChild(element, name);
return (child != null) ? Integer.parseInt(child.getTextContent()) : defaultValue;
}
protected void addModelEvaluationRuntime(ApplicationContainerCluster cluster) {
/* These bundles are added to all application container clusters, even if they haven't
* declared 'model-evaluation' in services.xml, because there are many public API packages
* in the model-evaluation bundle that could be used by customer code. */
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_EVALUATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.MODEL_INTEGRATION_BUNDLE_FILE);
cluster.addPlatformBundle(ContainerModelEvaluation.ONNXRUNTIME_BUNDLE_FILE);
/* The ONNX runtime is always available for injection to any component */
cluster.addSimpleComponent(
ContainerModelEvaluation.ONNX_RUNTIME_CLASS, null, ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME);
/* Add runtime providing utilities such as metrics to embedder implementations */
cluster.addSimpleComponent(
"ai.vespa.embedding.EmbedderRuntime", null, ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME);
}
private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement == null) return;
cluster.addSearchAndDocprocBundles();
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement),
serverBindings(deployState, context, processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new));
validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) {
SearchChains searchChains = new DomSearchChainsBuilder()
.build(deployState, containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(deployState, containerCluster, searchChains);
applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(deployState.getQueryProfiles());
containerSearch.setSemanticRules(deployState.getSemanticRules());
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder(cluster, portBindingOverride(deployState, context)).build(deployState, cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1)))
throw new IllegalArgumentException("Expected container version to be 1.0, but got " + version);
}
private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster, context.getDeployState());
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ApplicationContainerCluster cluster, DeployState deployState) {
ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), deployState);
cluster.addContainers(Collections.singleton(container));
}
private static String buildJvmGCOptions(ConfigModelContext context, String jvmGCOptions) {
return new JvmGcOptions(context.getDeployState(), jvmGCOptions).build();
}
private static String getJvmOptions(Element nodesElement,
DeployState deployState,
boolean legacyOptions) {
return new JvmOptions(nodesElement, deployState, legacyOptions).build();
}
private static String extractAttribute(Element element, String attrName) {
return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null;
}
private void extractJvmOptions(List<ApplicationContainer> nodes,
ApplicationContainerCluster cluster,
Element nodesElement,
ConfigModelContext context) {
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) {
extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context);
} else {
extractJvmTag(nodes, cluster, nodesElement, jvmElement, context);
}
}
private void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(nodesElement, context.getDeployState(), true));
if (cluster.getJvmGCOptions().isEmpty()) {
String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS);
if (jvmGCOptions != null && !jvmGCOptions.isEmpty()) {
DeployLogger logger = context.getDeployState().getDeployLogger();
logger.logApplicationPackage(WARNING, "'jvm-gc-options' is deprecated and will be removed in Vespa 9." +
" Please merge into 'gc-options' in 'jvm' element." +
" See https:
}
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
if (applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)))
context.getDeployState().getDeployLogger()
.logApplicationPackage(WARNING, "'allocated-memory' is deprecated and will be removed in Vespa 9." +
" Please merge into 'allocated-memory' in 'jvm' element." +
" See https:
}
private void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
Element nodesElement, Element jvmElement, ConfigModelContext context) {
applyNodesTagJvmArgs(nodes, getJvmOptions(nodesElement, context.getDeployState(), false));
applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS);
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
/**
* Add nodes to cluster according to the given containerElement.
*
* Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set
* of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed
* simultaneously for all active config models.
*/
private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
cluster.addContainers(allocateWithoutNodesTag(cluster, context));
} else {
List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context);
extractJvmOptions(nodes, cluster, nodesElement, context);
applyDefaultPreload(nodes, nodesElement);
var envVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT)).entrySet();
for (var container : nodes) {
for (var entry : envVars) {
container.addEnvironmentVariable(entry.getKey(), entry.getValue());
}
}
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private ZoneEndpoint zoneEndpoint(ConfigModelContext context, ClusterSpec.Id cluster) {
InstanceName instance = context.properties().applicationId().instance();
ZoneId zone = ZoneId.from(context.properties().zone().environment(),
context.properties().zone().region());
return context.getApplicationPackage().getDeploymentSpec().zoneEndpoint(instance, zone, cluster);
}
private static Map<String, String> getEnvironmentVariables(Element environmentVariables) {
var map = new LinkedHashMap<String, String>();
if (environmentVariables != null) {
for (Element var: XML.getChildren(environmentVariables)) {
var name = new com.yahoo.text.Identifier(var.getNodeName());
map.put(name.toString(), var.getTextContent());
}
}
return map;
}
private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element containerElement,
Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of")) {
List<ApplicationContainer> containers = createNodesFromContentServiceReference(cluster, nodesElement, context);
log.logApplicationPackage(WARNING, "Declaring combined cluster with <nodes of=\"...\"> is deprecated without " +
"replacement, and the feature will be removed in Vespa 9. Use separate container and " +
"content clusters instead");
return containers;
} else if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed())
return createNodesFromNodeCount(cluster, containerElement, nodesElement, context);
else
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
private static boolean applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
try {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return false;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("Missing % sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
return true;
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign", e);
}
}
/** Allocate a container cluster without a nodes tag */
private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) {
DeployState deployState = context.getDeployState();
HostSystem hostSystem = cluster.hostSystem();
if (deployState.isHosted()) {
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
deployState.getDeployLogger().logApplicationPackage(Level.INFO, "Using " + nodeCount + " nodes in " + cluster);
var nodesSpec = NodesSpecification.dedicated(nodeCount, context);
ClusterSpec.Id clusterId = ClusterSpec.Id.from(cluster.getName());
var hosts = nodesSpec.provision(hostSystem,
ClusterSpec.Type.container,
clusterId,
zoneEndpoint(context, clusterId),
deployState.getDeployLogger(),
false,
context.clusterInfo().build());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
else {
return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context);
}
}
private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) {
ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, context.getDeployState());
node.setHostResource(host);
node.initService(context.getDeployState());
return List.of(node);
}
private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) {
try {
var nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
var clusterId = ClusterSpec.Id.from(cluster.name());
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(),
ClusterSpec.Type.container,
clusterId,
zoneEndpoint(context, clusterId),
log,
getZooKeeper(containerElement) != null,
context.clusterInfo().build());
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("In " + cluster, e);
}
}
private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()))
.vespaVersion(context.getDeployState().getWantedNodeVespaVersion())
.dockerImageRepository(context.getDeployState().getWantedDockerImageRepo())
.build();
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().hostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), log);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodeSpecification;
try {
nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(cluster + " contains an invalid reference", e);
}
String referenceId = nodesElement.getAttribute("of");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodeSpecification,
referenceId,
cluster.getRoot().hostSystem(),
context);
return createNodesFromHosts(hosts, cluster, context.getDeployState());
}
private List<ApplicationContainer> createNodesFromHosts(Map<HostResource, ClusterMembership> hosts,
ApplicationContainerCluster cluster,
DeployState deployState) {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), deployState);
container.setHostResource(entry.getKey());
container.initService(deployState);
nodes.add(container);
}
return nodes;
}
private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) {
List<ApplicationContainer> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private static boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) {
for (Container container: containers) {
if (container.getAssignedJvmOptions().isEmpty())
container.prependJvmOptions(jvmArgs);
}
}
private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(DeployState deployState, ApplicationContainerCluster cluster, Element searchElement, ConfigModelContext context) {
var bindingPatterns = List.<BindingPattern>of(SearchHandler.DEFAULT_BINDING);
if (isHostedTenantApplication(context)) {
bindingPatterns = SearchHandler.bindingPattern(getDataplanePorts(deployState));
}
SearchHandler searchHandler = new SearchHandler(cluster,
serverBindings(deployState, context, searchElement, bindingPatterns),
ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null));
cluster.addComponent(searchHandler);
searchHandler.addComponent(Component.fromClassAndBundle(SearchHandler.EXECUTION_FACTORY, PlatformBundles.SEARCH_AND_DOCPROC_BUNDLE));
}
private List<BindingPattern> serverBindings(DeployState deployState, ConfigModelContext context, Element searchElement, Collection<BindingPattern> defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return List.copyOf(defaultBindings);
return toBindingList(deployState, context, bindings);
}
private List<BindingPattern> toBindingList(DeployState deployState, ConfigModelContext context, List<Element> bindingElements) {
List<BindingPattern> result = new ArrayList<>();
var portOverride = isHostedTenantApplication(context) ? getDataplanePorts(deployState) : Set.<Integer>of();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.addAll(userBindingPattern(text, portOverride));
}
return result;
}
private static Collection<UserBindingPattern> userBindingPattern(String path, Set<Integer> portBindingOverride) {
UserBindingPattern bindingPattern = UserBindingPattern.fromPattern(path);
if (portBindingOverride.isEmpty()) return Set.of(bindingPattern);
return portBindingOverride.stream()
.map(bindingPattern::withOverriddenPort)
.toList();
}
private ContainerDocumentApi buildDocumentApi(DeployState deployState, ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.HandlerOptions documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
Element ignoreUndefinedFields = XML.getChild(documentApiElement, "ignore-undefined-fields");
return new ContainerDocumentApi(cluster, documentApiOptions,
"true".equals(XML.getValue(ignoreUndefinedFields)), portBindingOverride(deployState, context));
}
private Set<Integer> portBindingOverride(DeployState deployState, ConfigModelContext context) {
return isHostedTenantApplication(context)
? getDataplanePorts(deployState)
: Set.<Integer>of();
}
private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement, deployState.getDeployLogger());
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster,
Element parent, String componentName) {
for (Element component : XML.getChildren(parent, componentName)) {
ModelIdResolver.resolveModelIds(component, deployState.isHosted());
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, component));
}
}
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
Element spec,
String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node));
}
}
private void addIdentityProvider(ApplicationContainerCluster cluster,
List<ConfigServerSpec> configServerSpecs,
HostName loadBalancerName,
URI ztsUrl,
String athenzDnsSuffix,
Zone zone,
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
AthenzService service = spec.athenzService(app.getApplicationId().instance(), zone.environment(), zone.region())
.orElseThrow(() -> new IllegalArgumentException("Missing Athenz service configuration in instance '" +
app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
IdentityProvider identityProvider = new IdentityProvider(domain,
service,
getLoadBalancerName(loadBalancerName, configServerSpecs),
ztsUrl,
zoneDnsSuffix,
zone);
cluster.removeComponent(ComponentId.fromString("com.yahoo.container.jdisc.AthenzIdentityProviderProvider"));
cluster.addComponent(identityProvider);
var serviceIdentityProviderProvider = "com.yahoo.vespa.athenz.identityprovider.client.ServiceIdentityProviderProvider";
cluster.addComponent(new SimpleComponent(new ComponentModel(serviceIdentityProviderProvider, serviceIdentityProviderProvider, "vespa-athenz")));
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.of(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
private static Element getZooKeeper(Element spec) {
return XML.getChild(spec, "zookeeper");
}
/** Disallow renderers named "XmlRenderer" or "JsonRenderer" */
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
public static boolean isContainerTag(Element element) {
return CONTAINER_TAG.equals(element.getTagName());
}
/**
* Validates JVM options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system.
*/
private static class JvmOptions {
private static final Pattern validPattern = Pattern.compile("-[a-zA-z0-9=:./,+*-]+");
private static final Pattern invalidInHostedPattern = Pattern.compile("-Xrunjdwp:transport=.*");
private final Element nodesElement;
private final DeployLogger logger;
private final boolean legacyOptions;
private final boolean isHosted;
public JvmOptions(Element nodesElement, DeployState deployState, boolean legacyOptions) {
this.nodesElement = nodesElement;
this.logger = deployState.getDeployLogger();
this.legacyOptions = legacyOptions;
this.isHosted = deployState.isHosted();
}
String build() {
if (legacyOptions)
return buildLegacyOptions();
Element jvmElement = XML.getChild(nodesElement, "jvm");
if (jvmElement == null) return "";
String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS);
if (jvmOptions.isEmpty()) return "";
validateJvmOptions(jvmOptions);
return jvmOptions;
}
String buildLegacyOptions() {
String jvmOptions = null;
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (! jvmOptions.isEmpty())
logger.logApplicationPackage(WARNING, "'jvm-options' is deprecated and will be removed in Vespa 9." +
" Please merge 'jvm-options' into 'options' or 'gc-options' in 'jvm' element." +
" See https:
}
validateJvmOptions(jvmOptions);
return jvmOptions;
}
private void validateJvmOptions(String jvmOptions) {
if (jvmOptions == null || jvmOptions.isEmpty()) return;
String[] optionList = jvmOptions.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option))
.sorted()
.collect(Collectors.toCollection(ArrayList::new));
if (isHosted)
invalidOptions.addAll(Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> Pattern.matches(invalidInHostedPattern.pattern(), option))
.sorted().toList());
if (invalidOptions.isEmpty()) return;
String message = "Invalid or misplaced JVM options in services.xml: " +
String.join(",", invalidOptions) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
/**
* Validates JVM GC options and logs a warning or fails deployment (depending on feature flag)
* if anyone of them has invalid syntax or is an option that is unsupported for the running system
* (e.g. uses CMS options for hosted Vespa, which uses JDK 17).
*/
private static class JvmGcOptions {
private static final Pattern validPattern = Pattern.compile("-XX:[+-]*[a-zA-z0-9=]+");
private static final Pattern invalidCMSPattern = Pattern.compile("-XX:[+-]\\w*CMS[a-zA-z0-9=]+");
private final DeployState deployState;
private final String jvmGcOptions;
private final DeployLogger logger;
private final boolean isHosted;
public JvmGcOptions(DeployState deployState, String jvmGcOptions) {
this.deployState = deployState;
this.jvmGcOptions = jvmGcOptions;
this.logger = deployState.getDeployLogger();
this.isHosted = deployState.isHosted();
}
private String build() {
String options = deployState.getProperties().jvmGCOptions();
if (jvmGcOptions != null) {
options = jvmGcOptions;
String[] optionList = options.split(" ");
List<String> invalidOptions = Arrays.stream(optionList)
.filter(option -> !option.isEmpty())
.filter(option -> !Pattern.matches(validPattern.pattern(), option)
|| Pattern.matches(invalidCMSPattern.pattern(), option)
|| option.equals("-XX:+UseConcMarkSweepGC"))
.sorted()
.toList();
logOrFailInvalidOptions(invalidOptions);
}
if (options == null || options.isEmpty())
options = deployState.isHosted() ? ContainerCluster.PARALLEL_GC : ContainerCluster.G1GC;
return options;
}
private void logOrFailInvalidOptions(List<String> options) {
if (options.isEmpty()) return;
String message = "Invalid or misplaced JVM GC options in services.xml: " +
String.join(",", options) + "." +
" See https:
if (isHosted)
throw new IllegalArgumentException(message);
else
logger.logApplicationPackage(WARNING, message);
}
}
private static Set<Integer> getDataplanePorts(DeployState ds) {
var tokenPort = getTokenDataplanePort(ds);
var mtlsPort = getMtlsDataplanePort(ds);
return tokenPort.isPresent() ? Set.of(mtlsPort, tokenPort.getAsInt()) : Set.of(mtlsPort);
}
private static int getMtlsDataplanePort(DeployState ds) {
return enableTokenSupport(ds) ? 8443 : 4443;
}
private static OptionalInt getTokenDataplanePort(DeployState ds) {
return enableTokenSupport(ds) ? OptionalInt.of(8444) : OptionalInt.empty();
}
private static Set<ContainerEndpoint> tokenEndpoints(DeployState deployState) {
return deployState.getEndpoints().stream()
.filter(endpoint -> endpoint.authMethod() == ApplicationClusterEndpoint.AuthMethod.token)
.collect(Collectors.toSet());
}
private static boolean enableTokenSupport(DeployState state) {
Set<ContainerEndpoint> tokenEndpoints = tokenEndpoints(state);
return state.isHosted() && state.zone().system().isPublic() && ! tokenEndpoints.isEmpty();
}
} |
Was this just for debugging or should it be kept? | public void process(boolean validate, boolean documentsOnly) {
if ( ! validate) return;
VerificationContext context = new VerificationContext(new MyAdapter(schema));
for (SDField field : schema.allConcreteFields()) {
ScriptExpression script = field.getIndexingScript();
try {
script.verify(context);
MyConverter converter = new MyConverter();
for (StatementExpression exp : script) {
converter.convert(exp);
}
} catch (VerificationException e) {
e.printStackTrace();
fail(schema, field, "For expression '" + e.getExpression() + "': " + Exceptions.toMessageString(e));
}
}
} | e.printStackTrace(); | public void process(boolean validate, boolean documentsOnly) {
if ( ! validate) return;
VerificationContext context = new VerificationContext(new MyAdapter(schema));
for (SDField field : schema.allConcreteFields()) {
ScriptExpression script = field.getIndexingScript();
try {
script.verify(context);
MyConverter converter = new MyConverter();
for (StatementExpression exp : script) {
converter.convert(exp);
}
} catch (VerificationException e) {
fail(schema, field, "For expression '" + e.getExpression() + "': " + Exceptions.toMessageString(e));
}
}
} | class IndexingValidation extends Processor {
IndexingValidation(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
private static class MyConverter extends ExpressionConverter {
final Set<String> outputs = new HashSet<>();
final Set<String> prevNames = new HashSet<>();
@Override
public ExpressionConverter branch() {
MyConverter ret = new MyConverter();
ret.outputs.addAll(outputs);
ret.prevNames.addAll(prevNames);
return ret;
}
@Override
protected boolean shouldConvert(Expression exp) {
if (exp instanceof OutputExpression) {
String fieldName = ((OutputExpression)exp).getFieldName();
if (outputs.contains(fieldName) && !prevNames.contains(fieldName)) {
throw new VerificationException(exp, "Attempting to assign conflicting values to field '" +
fieldName + "'.");
}
outputs.add(fieldName);
prevNames.add(fieldName);
}
if (exp.createdOutputType() != null) {
prevNames.clear();
}
return false;
}
@Override
protected Expression doConvert(Expression exp) {
throw new UnsupportedOperationException();
}
}
private static class MyAdapter implements FieldTypeAdapter {
final Schema schema;
MyAdapter(Schema schema) {
this.schema = schema;
}
@Override
public DataType getInputType(Expression exp, String fieldName) {
SDField field = schema.getDocumentField(fieldName);
if (field == null) {
throw new VerificationException(exp, "Input field '" + fieldName + "' not found.");
}
return field.getDataType();
}
@Override
public void tryOutputType(Expression exp, String fieldName, DataType valueType) {
String fieldDesc;
DataType fieldType;
if (exp instanceof AttributeExpression) {
Attribute attribute = schema.getAttribute(fieldName);
if (attribute == null) {
throw new VerificationException(exp, "Attribute '" + fieldName + "' not found.");
}
fieldDesc = "attribute";
fieldType = attribute.getDataType();
} else if (exp instanceof IndexExpression) {
SDField field = schema.getConcreteField(fieldName);
if (field == null) {
throw new VerificationException(exp, "Index field '" + fieldName + "' not found.");
}
fieldDesc = "index field";
fieldType = field.getDataType();
} else if (exp instanceof SummaryExpression) {
SummaryField field = schema.getSummaryField(fieldName);
if (field == null) {
throw new VerificationException(exp, "Summary field '" + fieldName + "' not found.");
}
fieldDesc = "summary field";
fieldType = field.getDataType();
} else {
throw new UnsupportedOperationException();
}
if ( ! fieldType.isAssignableFrom(valueType) &&
! fieldType.isAssignableFrom(createCompatType(valueType))) {
throw new VerificationException(exp, "Can not assign " + valueType.getName() + " to " + fieldDesc +
" '" + fieldName + "' which is " + fieldType.getName() + ".");
}
}
private static DataType createCompatType(DataType origType) {
if (origType instanceof ArrayDataType) {
return DataType.getArray(createCompatType(((ArrayDataType)origType).getNestedType()));
} else if (origType instanceof MapDataType) {
MapDataType mapType = (MapDataType)origType;
return DataType.getMap(createCompatType(mapType.getKeyType()),
createCompatType(mapType.getValueType()));
} else if (origType instanceof WeightedSetDataType) {
return DataType.getWeightedSet(createCompatType(((WeightedSetDataType)origType).getNestedType()));
} else if (GeoPos.isPos(origType)) {
return DataType.LONG;
} else {
return origType;
}
}
}
} | class IndexingValidation extends Processor {
IndexingValidation(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
private static class MyConverter extends ExpressionConverter {
final Set<String> outputs = new HashSet<>();
final Set<String> prevNames = new HashSet<>();
@Override
public ExpressionConverter branch() {
MyConverter ret = new MyConverter();
ret.outputs.addAll(outputs);
ret.prevNames.addAll(prevNames);
return ret;
}
@Override
protected boolean shouldConvert(Expression exp) {
if (exp instanceof OutputExpression) {
String fieldName = ((OutputExpression)exp).getFieldName();
if (outputs.contains(fieldName) && !prevNames.contains(fieldName)) {
throw new VerificationException(exp, "Attempting to assign conflicting values to field '" +
fieldName + "'.");
}
outputs.add(fieldName);
prevNames.add(fieldName);
}
if (exp.createdOutputType() != null) {
prevNames.clear();
}
return false;
}
@Override
protected Expression doConvert(Expression exp) {
throw new UnsupportedOperationException();
}
}
private static class MyAdapter implements FieldTypeAdapter {
final Schema schema;
MyAdapter(Schema schema) {
this.schema = schema;
}
@Override
public DataType getInputType(Expression exp, String fieldName) {
SDField field = schema.getDocumentField(fieldName);
if (field == null) {
throw new VerificationException(exp, "Input field '" + fieldName + "' not found.");
}
return field.getDataType();
}
@Override
public void tryOutputType(Expression exp, String fieldName, DataType valueType) {
String fieldDesc;
DataType fieldType;
if (exp instanceof AttributeExpression) {
Attribute attribute = schema.getAttribute(fieldName);
if (attribute == null) {
throw new VerificationException(exp, "Attribute '" + fieldName + "' not found.");
}
fieldDesc = "attribute";
fieldType = attribute.getDataType();
} else if (exp instanceof IndexExpression) {
SDField field = schema.getConcreteField(fieldName);
if (field == null) {
throw new VerificationException(exp, "Index field '" + fieldName + "' not found.");
}
fieldDesc = "index field";
fieldType = field.getDataType();
} else if (exp instanceof SummaryExpression) {
SummaryField field = schema.getSummaryField(fieldName);
if (field == null) {
throw new VerificationException(exp, "Summary field '" + fieldName + "' not found.");
}
fieldDesc = "summary field";
fieldType = field.getDataType();
} else {
throw new UnsupportedOperationException();
}
if ( ! fieldType.isAssignableFrom(valueType) &&
! fieldType.isAssignableFrom(createCompatType(valueType))) {
throw new VerificationException(exp, "Can not assign " + valueType.getName() + " to " + fieldDesc +
" '" + fieldName + "' which is " + fieldType.getName() + ".");
}
}
private static DataType createCompatType(DataType origType) {
if (origType instanceof ArrayDataType) {
return DataType.getArray(createCompatType(((ArrayDataType)origType).getNestedType()));
} else if (origType instanceof MapDataType) {
MapDataType mapType = (MapDataType)origType;
return DataType.getMap(createCompatType(mapType.getKeyType()),
createCompatType(mapType.getValueType()));
} else if (origType instanceof WeightedSetDataType) {
return DataType.getWeightedSet(createCompatType(((WeightedSetDataType)origType).getNestedType()));
} else if (GeoPos.isPos(origType)) {
return DataType.LONG;
} else {
return origType;
}
}
}
} |
We should have an "assert false" (or similar) on the next line to avoid this test to wrongfully pass if no exception is thrown. | void testNoInputInDerivedField() throws ParseException {
try {
var schema = """
schema test {
document test {
field field1 type int {
}
}
field derived1 type int {
indexing: attribute
}
}
""";
ApplicationBuilder.createFromString(schema);
}
catch (IllegalArgumentException e) {
assertEquals("For schema 'test', field 'derived1': For expression '{ attribute derived1; }': " +
"Expected any input, but no input is specified",
Exceptions.toMessageString(e));
}
} | ApplicationBuilder.createFromString(schema); | void testNoInputInDerivedField() throws ParseException {
try {
var schema = """
schema test {
document test {
field field1 type int {
}
}
field derived1 type int {
indexing: attribute
}
}
""";
ApplicationBuilder.createFromString(schema);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("For schema 'test', field 'derived1': For expression '{ attribute derived1; }': " +
"Expected any input, but no input is specified",
Exceptions.toMessageString(e));
}
} | class IndexingInputsTestCase {
@Test
void requireThatExtraFieldInputExtraFieldThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_extra_field.sd",
"For schema 'indexing_extra_field_input_extra_field', field 'bar': Indexing script refers " +
"to field 'bar' which is neither a field in document type " +
"'indexing_extra_field_input_extra_field' nor a mutable attribute");
}
@Test
void requireThatExtraFieldInputImplicitThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_implicit.sd",
"For schema 'indexing_extra_field_input_implicit', field 'foo': " +
"For expression '{ tokenize normalize stem:\"BEST\" | index foo; }': Expected string input, but no input is specified");
}
@Test
void requireThatExtraFieldInputNullThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_null.sd",
"For schema 'indexing_extra_field_input_null', field 'foo': Indexing script refers to field " +
"'foo' which is neither a field in document type 'indexing_extra_field_input_null' nor a mutable attribute");
}
@Test
void requireThatExtraFieldInputSelfThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_self.sd",
"For schema 'indexing_extra_field_input_self', field 'foo': Indexing script refers to field " +
"'foo' which is neither a field in document type 'indexing_extra_field_input_self' nor a mutable attribute");
}
@Test
void testPlainInputInDerivedField() throws ParseException {
var schema = """
schema test {
document test {
field field1 type int {
}
}
field derived1 type int {
indexing: input field1 | attribute
}
}
""";
ApplicationBuilder.createFromString(schema);
}
@Test
void testWrappedInputInDerivedField() throws ParseException {
var schema = """
schema test {
document test {
field field1 type int {
}
}
field derived1 type int {
indexing: if (input field1 == 0) { 0 } else { 1 } | attribute
}
}
""";
ApplicationBuilder.createFromString(schema);
}
@Test
} | class IndexingInputsTestCase {
@Test
void requireThatExtraFieldInputExtraFieldThrows() throws ParseException {
try {
var schema = """
search indexing_extra_field_input_extra_field {
document indexing_extra_field_input_extra_field {
}
field foo type string {
}
field bar type string {
indexing: input bar | index
}
}
""";
ApplicationBuilder.createFromString(schema);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("For schema 'indexing_extra_field_input_extra_field', field 'bar': Indexing script refers " +
"to field 'bar' which is neither a field in document type " +
"'indexing_extra_field_input_extra_field' nor a mutable attribute",
Exceptions.toMessageString(e));
}
}
@Test
void requireThatExtraFieldInputImplicitThrows() throws ParseException {
try {
var schema = """
search indexing_extra_field_input_implicit {
document indexing_extra_field_input_implicit {
}
field foo type string {
indexing: index
}
}
""";
ApplicationBuilder.createFromString(schema);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("For schema 'indexing_extra_field_input_implicit', field 'foo': " +
"For expression '{ tokenize normalize stem:\"BEST\" | index foo; }': Expected string input, but no input is specified",
Exceptions.toMessageString(e));
}
}
@Test
void requireThatExtraFieldInputNullThrows() throws ParseException {
try {
var schema = """
search indexing_extra_field_input_null {
document indexing_extra_field_input_null {
}
field foo type string {
indexing: input foo | index
}
}
""";
ApplicationBuilder.createFromString(schema);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("For schema 'indexing_extra_field_input_null', field 'foo': Indexing script refers to field " +
"'foo' which is neither a field in document type 'indexing_extra_field_input_null' nor a mutable attribute",
Exceptions.toMessageString(e));
}
}
@Test
void requireThatExtraFieldInputSelfThrows() throws ParseException {
try {
var schema = """
search indexing_extra_field_input_self {
document indexing_extra_field_input_self {
}
field foo type string {
indexing: input foo | index
}
}
""";
ApplicationBuilder.createFromString(schema);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("For schema 'indexing_extra_field_input_self', field 'foo': Indexing script refers to field " +
"'foo' which is neither a field in document type 'indexing_extra_field_input_self' nor a mutable attribute",
Exceptions.toMessageString(e));
}
}
@Test
void testPlainInputInDerivedField() throws ParseException {
var schema = """
schema test {
document test {
field field1 type int {
}
}
field derived1 type int {
indexing: input field1 | attribute
}
}
""";
ApplicationBuilder.createFromString(schema);
}
@Test
void testWrappedInputInDerivedField() throws ParseException {
var schema = """
schema test {
document test {
field field1 type int {
}
}
field derived1 type int {
indexing: if (input field1 == 0) { 0 } else { 1 } | attribute
}
}
""";
ApplicationBuilder.createFromString(schema);
}
@Test
} |
Fixed - thanks. | public void process(boolean validate, boolean documentsOnly) {
if ( ! validate) return;
VerificationContext context = new VerificationContext(new MyAdapter(schema));
for (SDField field : schema.allConcreteFields()) {
ScriptExpression script = field.getIndexingScript();
try {
script.verify(context);
MyConverter converter = new MyConverter();
for (StatementExpression exp : script) {
converter.convert(exp);
}
} catch (VerificationException e) {
e.printStackTrace();
fail(schema, field, "For expression '" + e.getExpression() + "': " + Exceptions.toMessageString(e));
}
}
} | e.printStackTrace(); | public void process(boolean validate, boolean documentsOnly) {
if ( ! validate) return;
VerificationContext context = new VerificationContext(new MyAdapter(schema));
for (SDField field : schema.allConcreteFields()) {
ScriptExpression script = field.getIndexingScript();
try {
script.verify(context);
MyConverter converter = new MyConverter();
for (StatementExpression exp : script) {
converter.convert(exp);
}
} catch (VerificationException e) {
fail(schema, field, "For expression '" + e.getExpression() + "': " + Exceptions.toMessageString(e));
}
}
} | class IndexingValidation extends Processor {
IndexingValidation(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
private static class MyConverter extends ExpressionConverter {
final Set<String> outputs = new HashSet<>();
final Set<String> prevNames = new HashSet<>();
@Override
public ExpressionConverter branch() {
MyConverter ret = new MyConverter();
ret.outputs.addAll(outputs);
ret.prevNames.addAll(prevNames);
return ret;
}
@Override
protected boolean shouldConvert(Expression exp) {
if (exp instanceof OutputExpression) {
String fieldName = ((OutputExpression)exp).getFieldName();
if (outputs.contains(fieldName) && !prevNames.contains(fieldName)) {
throw new VerificationException(exp, "Attempting to assign conflicting values to field '" +
fieldName + "'.");
}
outputs.add(fieldName);
prevNames.add(fieldName);
}
if (exp.createdOutputType() != null) {
prevNames.clear();
}
return false;
}
@Override
protected Expression doConvert(Expression exp) {
throw new UnsupportedOperationException();
}
}
private static class MyAdapter implements FieldTypeAdapter {
final Schema schema;
MyAdapter(Schema schema) {
this.schema = schema;
}
@Override
public DataType getInputType(Expression exp, String fieldName) {
SDField field = schema.getDocumentField(fieldName);
if (field == null) {
throw new VerificationException(exp, "Input field '" + fieldName + "' not found.");
}
return field.getDataType();
}
@Override
public void tryOutputType(Expression exp, String fieldName, DataType valueType) {
String fieldDesc;
DataType fieldType;
if (exp instanceof AttributeExpression) {
Attribute attribute = schema.getAttribute(fieldName);
if (attribute == null) {
throw new VerificationException(exp, "Attribute '" + fieldName + "' not found.");
}
fieldDesc = "attribute";
fieldType = attribute.getDataType();
} else if (exp instanceof IndexExpression) {
SDField field = schema.getConcreteField(fieldName);
if (field == null) {
throw new VerificationException(exp, "Index field '" + fieldName + "' not found.");
}
fieldDesc = "index field";
fieldType = field.getDataType();
} else if (exp instanceof SummaryExpression) {
SummaryField field = schema.getSummaryField(fieldName);
if (field == null) {
throw new VerificationException(exp, "Summary field '" + fieldName + "' not found.");
}
fieldDesc = "summary field";
fieldType = field.getDataType();
} else {
throw new UnsupportedOperationException();
}
if ( ! fieldType.isAssignableFrom(valueType) &&
! fieldType.isAssignableFrom(createCompatType(valueType))) {
throw new VerificationException(exp, "Can not assign " + valueType.getName() + " to " + fieldDesc +
" '" + fieldName + "' which is " + fieldType.getName() + ".");
}
}
private static DataType createCompatType(DataType origType) {
if (origType instanceof ArrayDataType) {
return DataType.getArray(createCompatType(((ArrayDataType)origType).getNestedType()));
} else if (origType instanceof MapDataType) {
MapDataType mapType = (MapDataType)origType;
return DataType.getMap(createCompatType(mapType.getKeyType()),
createCompatType(mapType.getValueType()));
} else if (origType instanceof WeightedSetDataType) {
return DataType.getWeightedSet(createCompatType(((WeightedSetDataType)origType).getNestedType()));
} else if (GeoPos.isPos(origType)) {
return DataType.LONG;
} else {
return origType;
}
}
}
} | class IndexingValidation extends Processor {
IndexingValidation(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(schema, deployLogger, rankProfileRegistry, queryProfiles);
}
@Override
private static class MyConverter extends ExpressionConverter {
final Set<String> outputs = new HashSet<>();
final Set<String> prevNames = new HashSet<>();
@Override
public ExpressionConverter branch() {
MyConverter ret = new MyConverter();
ret.outputs.addAll(outputs);
ret.prevNames.addAll(prevNames);
return ret;
}
@Override
protected boolean shouldConvert(Expression exp) {
if (exp instanceof OutputExpression) {
String fieldName = ((OutputExpression)exp).getFieldName();
if (outputs.contains(fieldName) && !prevNames.contains(fieldName)) {
throw new VerificationException(exp, "Attempting to assign conflicting values to field '" +
fieldName + "'.");
}
outputs.add(fieldName);
prevNames.add(fieldName);
}
if (exp.createdOutputType() != null) {
prevNames.clear();
}
return false;
}
@Override
protected Expression doConvert(Expression exp) {
throw new UnsupportedOperationException();
}
}
private static class MyAdapter implements FieldTypeAdapter {
final Schema schema;
MyAdapter(Schema schema) {
this.schema = schema;
}
@Override
public DataType getInputType(Expression exp, String fieldName) {
SDField field = schema.getDocumentField(fieldName);
if (field == null) {
throw new VerificationException(exp, "Input field '" + fieldName + "' not found.");
}
return field.getDataType();
}
@Override
public void tryOutputType(Expression exp, String fieldName, DataType valueType) {
String fieldDesc;
DataType fieldType;
if (exp instanceof AttributeExpression) {
Attribute attribute = schema.getAttribute(fieldName);
if (attribute == null) {
throw new VerificationException(exp, "Attribute '" + fieldName + "' not found.");
}
fieldDesc = "attribute";
fieldType = attribute.getDataType();
} else if (exp instanceof IndexExpression) {
SDField field = schema.getConcreteField(fieldName);
if (field == null) {
throw new VerificationException(exp, "Index field '" + fieldName + "' not found.");
}
fieldDesc = "index field";
fieldType = field.getDataType();
} else if (exp instanceof SummaryExpression) {
SummaryField field = schema.getSummaryField(fieldName);
if (field == null) {
throw new VerificationException(exp, "Summary field '" + fieldName + "' not found.");
}
fieldDesc = "summary field";
fieldType = field.getDataType();
} else {
throw new UnsupportedOperationException();
}
if ( ! fieldType.isAssignableFrom(valueType) &&
! fieldType.isAssignableFrom(createCompatType(valueType))) {
throw new VerificationException(exp, "Can not assign " + valueType.getName() + " to " + fieldDesc +
" '" + fieldName + "' which is " + fieldType.getName() + ".");
}
}
private static DataType createCompatType(DataType origType) {
if (origType instanceof ArrayDataType) {
return DataType.getArray(createCompatType(((ArrayDataType)origType).getNestedType()));
} else if (origType instanceof MapDataType) {
MapDataType mapType = (MapDataType)origType;
return DataType.getMap(createCompatType(mapType.getKeyType()),
createCompatType(mapType.getValueType()));
} else if (origType instanceof WeightedSetDataType) {
return DataType.getWeightedSet(createCompatType(((WeightedSetDataType)origType).getNestedType()));
} else if (GeoPos.isPos(origType)) {
return DataType.LONG;
} else {
return origType;
}
}
}
} |
Was already fixed. | void testNoInputInDerivedField() throws ParseException {
try {
var schema = """
schema test {
document test {
field field1 type int {
}
}
field derived1 type int {
indexing: attribute
}
}
""";
ApplicationBuilder.createFromString(schema);
}
catch (IllegalArgumentException e) {
assertEquals("For schema 'test', field 'derived1': For expression '{ attribute derived1; }': " +
"Expected any input, but no input is specified",
Exceptions.toMessageString(e));
}
} | ApplicationBuilder.createFromString(schema); | void testNoInputInDerivedField() throws ParseException {
try {
var schema = """
schema test {
document test {
field field1 type int {
}
}
field derived1 type int {
indexing: attribute
}
}
""";
ApplicationBuilder.createFromString(schema);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("For schema 'test', field 'derived1': For expression '{ attribute derived1; }': " +
"Expected any input, but no input is specified",
Exceptions.toMessageString(e));
}
} | class IndexingInputsTestCase {
@Test
void requireThatExtraFieldInputExtraFieldThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_extra_field.sd",
"For schema 'indexing_extra_field_input_extra_field', field 'bar': Indexing script refers " +
"to field 'bar' which is neither a field in document type " +
"'indexing_extra_field_input_extra_field' nor a mutable attribute");
}
@Test
void requireThatExtraFieldInputImplicitThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_implicit.sd",
"For schema 'indexing_extra_field_input_implicit', field 'foo': " +
"For expression '{ tokenize normalize stem:\"BEST\" | index foo; }': Expected string input, but no input is specified");
}
@Test
void requireThatExtraFieldInputNullThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_null.sd",
"For schema 'indexing_extra_field_input_null', field 'foo': Indexing script refers to field " +
"'foo' which is neither a field in document type 'indexing_extra_field_input_null' nor a mutable attribute");
}
@Test
void requireThatExtraFieldInputSelfThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_self.sd",
"For schema 'indexing_extra_field_input_self', field 'foo': Indexing script refers to field " +
"'foo' which is neither a field in document type 'indexing_extra_field_input_self' nor a mutable attribute");
}
@Test
void testPlainInputInDerivedField() throws ParseException {
var schema = """
schema test {
document test {
field field1 type int {
}
}
field derived1 type int {
indexing: input field1 | attribute
}
}
""";
ApplicationBuilder.createFromString(schema);
}
@Test
void testWrappedInputInDerivedField() throws ParseException {
var schema = """
schema test {
document test {
field field1 type int {
}
}
field derived1 type int {
indexing: if (input field1 == 0) { 0 } else { 1 } | attribute
}
}
""";
ApplicationBuilder.createFromString(schema);
}
@Test
} | class IndexingInputsTestCase {
@Test
void requireThatExtraFieldInputExtraFieldThrows() throws ParseException {
try {
var schema = """
search indexing_extra_field_input_extra_field {
document indexing_extra_field_input_extra_field {
}
field foo type string {
}
field bar type string {
indexing: input bar | index
}
}
""";
ApplicationBuilder.createFromString(schema);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("For schema 'indexing_extra_field_input_extra_field', field 'bar': Indexing script refers " +
"to field 'bar' which is neither a field in document type " +
"'indexing_extra_field_input_extra_field' nor a mutable attribute",
Exceptions.toMessageString(e));
}
}
@Test
void requireThatExtraFieldInputImplicitThrows() throws ParseException {
try {
var schema = """
search indexing_extra_field_input_implicit {
document indexing_extra_field_input_implicit {
}
field foo type string {
indexing: index
}
}
""";
ApplicationBuilder.createFromString(schema);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("For schema 'indexing_extra_field_input_implicit', field 'foo': " +
"For expression '{ tokenize normalize stem:\"BEST\" | index foo; }': Expected string input, but no input is specified",
Exceptions.toMessageString(e));
}
}
@Test
void requireThatExtraFieldInputNullThrows() throws ParseException {
try {
var schema = """
search indexing_extra_field_input_null {
document indexing_extra_field_input_null {
}
field foo type string {
indexing: input foo | index
}
}
""";
ApplicationBuilder.createFromString(schema);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("For schema 'indexing_extra_field_input_null', field 'foo': Indexing script refers to field " +
"'foo' which is neither a field in document type 'indexing_extra_field_input_null' nor a mutable attribute",
Exceptions.toMessageString(e));
}
}
@Test
void requireThatExtraFieldInputSelfThrows() throws ParseException {
try {
var schema = """
search indexing_extra_field_input_self {
document indexing_extra_field_input_self {
}
field foo type string {
indexing: input foo | index
}
}
""";
ApplicationBuilder.createFromString(schema);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("For schema 'indexing_extra_field_input_self', field 'foo': Indexing script refers to field " +
"'foo' which is neither a field in document type 'indexing_extra_field_input_self' nor a mutable attribute",
Exceptions.toMessageString(e));
}
}
@Test
void testPlainInputInDerivedField() throws ParseException {
var schema = """
schema test {
document test {
field field1 type int {
}
}
field derived1 type int {
indexing: input field1 | attribute
}
}
""";
ApplicationBuilder.createFromString(schema);
}
@Test
void testWrappedInputInDerivedField() throws ParseException {
var schema = """
schema test {
document test {
field field1 type int {
}
}
field derived1 type int {
indexing: if (input field1 == 0) { 0 } else { 1 } | attribute
}
}
""";
ApplicationBuilder.createFromString(schema);
}
@Test
} |
Unnecessary read? | SessionData read(SessionZooKeeperClient zooKeeperClient, BooleanFlag readSessionData) {
if (readSessionData.value() && zooKeeperClient.sessionDataExists())
try {
return zooKeeperClient.readSessionData();
} catch (Exception e) {
log.log(WARNING, "Unable to read session dato for session " + zooKeeperClient.sessionId() +
": " + Exceptions.toMessageString(e));
readSessionDataFromLegacyPaths(zooKeeperClient);
}
return readSessionDataFromLegacyPaths(zooKeeperClient);
} | readSessionDataFromLegacyPaths(zooKeeperClient); | SessionData read(SessionZooKeeperClient zooKeeperClient, BooleanFlag readSessionData) {
if (readSessionData.value() && zooKeeperClient.sessionDataExists())
try {
return zooKeeperClient.readSessionData();
} catch (Exception e) {
log.log(WARNING, "Unable to read session data for session " + zooKeeperClient.sessionId() +
": " + Exceptions.toMessageString(e));
}
return readSessionDataFromLegacyPaths(zooKeeperClient);
} | class SessionSerializer {
private static final Logger log = Logger.getLogger(SessionSerializer.class.getName());
void write(SessionZooKeeperClient zooKeeperClient, ApplicationId applicationId,
Instant created, Optional<FileReference> fileReference, Optional<DockerImage> dockerImageRepository,
Version vespaVersion, Optional<AthenzDomain> athenzDomain, Optional<Quota> quota,
List<TenantSecretStore> tenantSecretStores, List<X509Certificate> operatorCertificates,
Optional<CloudAccount> cloudAccount, List<DataplaneToken> dataplaneTokens,
BooleanFlag writeSessionData) {
zooKeeperClient.writeApplicationId(applicationId);
zooKeeperClient.writeApplicationPackageReference(fileReference);
zooKeeperClient.writeVespaVersion(vespaVersion);
zooKeeperClient.writeDockerImageRepository(dockerImageRepository);
zooKeeperClient.writeAthenzDomain(athenzDomain);
zooKeeperClient.writeQuota(quota);
zooKeeperClient.writeTenantSecretStores(tenantSecretStores);
zooKeeperClient.writeOperatorCertificates(operatorCertificates);
zooKeeperClient.writeCloudAccount(cloudAccount);
zooKeeperClient.writeDataplaneTokens(dataplaneTokens);
if (writeSessionData.value())
zooKeeperClient.writeSessionData(new SessionData(applicationId,
fileReference,
vespaVersion,
created,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
operatorCertificates,
cloudAccount,
dataplaneTokens));
}
private static SessionData readSessionDataFromLegacyPaths(SessionZooKeeperClient zooKeeperClient) {
return new SessionData(zooKeeperClient.readApplicationId(),
zooKeeperClient.readApplicationPackageReference(),
zooKeeperClient.readVespaVersion(),
zooKeeperClient.readCreateTime(),
zooKeeperClient.readDockerImageRepository(),
zooKeeperClient.readAthenzDomain(),
zooKeeperClient.readQuota(),
zooKeeperClient.readTenantSecretStores(),
zooKeeperClient.readOperatorCertificates(),
zooKeeperClient.readCloudAccount(),
zooKeeperClient.readDataplaneTokens());
}
} | class SessionSerializer {
private static final Logger log = Logger.getLogger(SessionSerializer.class.getName());
void write(SessionZooKeeperClient zooKeeperClient, ApplicationId applicationId,
Instant created, Optional<FileReference> fileReference, Optional<DockerImage> dockerImageRepository,
Version vespaVersion, Optional<AthenzDomain> athenzDomain, Optional<Quota> quota,
List<TenantSecretStore> tenantSecretStores, List<X509Certificate> operatorCertificates,
Optional<CloudAccount> cloudAccount, List<DataplaneToken> dataplaneTokens,
BooleanFlag writeSessionData) {
zooKeeperClient.writeApplicationId(applicationId);
zooKeeperClient.writeApplicationPackageReference(fileReference);
zooKeeperClient.writeVespaVersion(vespaVersion);
zooKeeperClient.writeDockerImageRepository(dockerImageRepository);
zooKeeperClient.writeAthenzDomain(athenzDomain);
zooKeeperClient.writeQuota(quota);
zooKeeperClient.writeTenantSecretStores(tenantSecretStores);
zooKeeperClient.writeOperatorCertificates(operatorCertificates);
zooKeeperClient.writeCloudAccount(cloudAccount);
zooKeeperClient.writeDataplaneTokens(dataplaneTokens);
if (writeSessionData.value())
zooKeeperClient.writeSessionData(new SessionData(applicationId,
fileReference,
vespaVersion,
created,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
operatorCertificates,
cloudAccount,
dataplaneTokens));
}
private static SessionData readSessionDataFromLegacyPaths(SessionZooKeeperClient zooKeeperClient) {
return new SessionData(zooKeeperClient.readApplicationId(),
zooKeeperClient.readApplicationPackageReference(),
zooKeeperClient.readVespaVersion(),
zooKeeperClient.readCreateTime(),
zooKeeperClient.readDockerImageRepository(),
zooKeeperClient.readAthenzDomain(),
zooKeeperClient.readQuota(),
zooKeeperClient.readTenantSecretStores(),
zooKeeperClient.readOperatorCertificates(),
zooKeeperClient.readCloudAccount(),
zooKeeperClient.readDataplaneTokens());
}
} |
Yeah, I guess I can just let if fall through, will remove it | SessionData read(SessionZooKeeperClient zooKeeperClient, BooleanFlag readSessionData) {
if (readSessionData.value() && zooKeeperClient.sessionDataExists())
try {
return zooKeeperClient.readSessionData();
} catch (Exception e) {
log.log(WARNING, "Unable to read session dato for session " + zooKeeperClient.sessionId() +
": " + Exceptions.toMessageString(e));
readSessionDataFromLegacyPaths(zooKeeperClient);
}
return readSessionDataFromLegacyPaths(zooKeeperClient);
} | readSessionDataFromLegacyPaths(zooKeeperClient); | SessionData read(SessionZooKeeperClient zooKeeperClient, BooleanFlag readSessionData) {
if (readSessionData.value() && zooKeeperClient.sessionDataExists())
try {
return zooKeeperClient.readSessionData();
} catch (Exception e) {
log.log(WARNING, "Unable to read session data for session " + zooKeeperClient.sessionId() +
": " + Exceptions.toMessageString(e));
}
return readSessionDataFromLegacyPaths(zooKeeperClient);
} | class SessionSerializer {
private static final Logger log = Logger.getLogger(SessionSerializer.class.getName());
void write(SessionZooKeeperClient zooKeeperClient, ApplicationId applicationId,
Instant created, Optional<FileReference> fileReference, Optional<DockerImage> dockerImageRepository,
Version vespaVersion, Optional<AthenzDomain> athenzDomain, Optional<Quota> quota,
List<TenantSecretStore> tenantSecretStores, List<X509Certificate> operatorCertificates,
Optional<CloudAccount> cloudAccount, List<DataplaneToken> dataplaneTokens,
BooleanFlag writeSessionData) {
zooKeeperClient.writeApplicationId(applicationId);
zooKeeperClient.writeApplicationPackageReference(fileReference);
zooKeeperClient.writeVespaVersion(vespaVersion);
zooKeeperClient.writeDockerImageRepository(dockerImageRepository);
zooKeeperClient.writeAthenzDomain(athenzDomain);
zooKeeperClient.writeQuota(quota);
zooKeeperClient.writeTenantSecretStores(tenantSecretStores);
zooKeeperClient.writeOperatorCertificates(operatorCertificates);
zooKeeperClient.writeCloudAccount(cloudAccount);
zooKeeperClient.writeDataplaneTokens(dataplaneTokens);
if (writeSessionData.value())
zooKeeperClient.writeSessionData(new SessionData(applicationId,
fileReference,
vespaVersion,
created,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
operatorCertificates,
cloudAccount,
dataplaneTokens));
}
private static SessionData readSessionDataFromLegacyPaths(SessionZooKeeperClient zooKeeperClient) {
return new SessionData(zooKeeperClient.readApplicationId(),
zooKeeperClient.readApplicationPackageReference(),
zooKeeperClient.readVespaVersion(),
zooKeeperClient.readCreateTime(),
zooKeeperClient.readDockerImageRepository(),
zooKeeperClient.readAthenzDomain(),
zooKeeperClient.readQuota(),
zooKeeperClient.readTenantSecretStores(),
zooKeeperClient.readOperatorCertificates(),
zooKeeperClient.readCloudAccount(),
zooKeeperClient.readDataplaneTokens());
}
} | class SessionSerializer {
private static final Logger log = Logger.getLogger(SessionSerializer.class.getName());
void write(SessionZooKeeperClient zooKeeperClient, ApplicationId applicationId,
Instant created, Optional<FileReference> fileReference, Optional<DockerImage> dockerImageRepository,
Version vespaVersion, Optional<AthenzDomain> athenzDomain, Optional<Quota> quota,
List<TenantSecretStore> tenantSecretStores, List<X509Certificate> operatorCertificates,
Optional<CloudAccount> cloudAccount, List<DataplaneToken> dataplaneTokens,
BooleanFlag writeSessionData) {
zooKeeperClient.writeApplicationId(applicationId);
zooKeeperClient.writeApplicationPackageReference(fileReference);
zooKeeperClient.writeVespaVersion(vespaVersion);
zooKeeperClient.writeDockerImageRepository(dockerImageRepository);
zooKeeperClient.writeAthenzDomain(athenzDomain);
zooKeeperClient.writeQuota(quota);
zooKeeperClient.writeTenantSecretStores(tenantSecretStores);
zooKeeperClient.writeOperatorCertificates(operatorCertificates);
zooKeeperClient.writeCloudAccount(cloudAccount);
zooKeeperClient.writeDataplaneTokens(dataplaneTokens);
if (writeSessionData.value())
zooKeeperClient.writeSessionData(new SessionData(applicationId,
fileReference,
vespaVersion,
created,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
operatorCertificates,
cloudAccount,
dataplaneTokens));
}
private static SessionData readSessionDataFromLegacyPaths(SessionZooKeeperClient zooKeeperClient) {
return new SessionData(zooKeeperClient.readApplicationId(),
zooKeeperClient.readApplicationPackageReference(),
zooKeeperClient.readVespaVersion(),
zooKeeperClient.readCreateTime(),
zooKeeperClient.readDockerImageRepository(),
zooKeeperClient.readAthenzDomain(),
zooKeeperClient.readQuota(),
zooKeeperClient.readTenantSecretStores(),
zooKeeperClient.readOperatorCertificates(),
zooKeeperClient.readCloudAccount(),
zooKeeperClient.readDataplaneTokens());
}
} |
Consider simplifying with something like ```suggestion Optional<String> planId = SlimeUtils.optionalString(tenantObject.field(planIdField)); ``` | private CloudTenant cloudTenantFrom(Inspector tenantObject) {
TenantName name = TenantName.from(tenantObject.field(nameField).asString());
Instant createdAt = SlimeUtils.instant(tenantObject.field(createdAtField));
LastLoginInfo lastLoginInfo = lastLoginInfoFromSlime(tenantObject.field(lastLoginInfoField));
Optional<SimplePrincipal> creator = SlimeUtils.optionalString(tenantObject.field(creatorField)).map(SimplePrincipal::new);
BiMap<PublicKey, SimplePrincipal> developerKeys = developerKeysFromSlime(tenantObject.field(pemDeveloperKeysField));
TenantInfo info = tenantInfoFromSlime(tenantObject.field(tenantInfoField));
List<TenantSecretStore> tenantSecretStores = secretStoresFromSlime(tenantObject.field(secretStoresField));
ArchiveAccess archiveAccess = archiveAccessFromSlime(tenantObject);
Optional<Instant> invalidateUserSessionsBefore = SlimeUtils.optionalInstant(tenantObject.field(invalidateUserSessionsBeforeField));
Instant tenantRolesLastMaintained = SlimeUtils.instant(tenantObject.field(tenantRolesLastMaintainedField));
List<CloudAccountInfo> cloudAccountInfos = cloudAccountsFromSlime(tenantObject.field(cloudAccountsField));
Optional<BillingReference> billingReference = billingReferenceFrom(tenantObject.field(billingReferenceField));
Optional<String> planId = planId(tenantObject.field(planIdField));
return new CloudTenant(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores,
archiveAccess, invalidateUserSessionsBefore, tenantRolesLastMaintained,
cloudAccountInfos, billingReference, planId);
} | Optional<String> planId = planId(tenantObject.field(planIdField)); | private CloudTenant cloudTenantFrom(Inspector tenantObject) {
TenantName name = TenantName.from(tenantObject.field(nameField).asString());
Instant createdAt = SlimeUtils.instant(tenantObject.field(createdAtField));
LastLoginInfo lastLoginInfo = lastLoginInfoFromSlime(tenantObject.field(lastLoginInfoField));
Optional<SimplePrincipal> creator = SlimeUtils.optionalString(tenantObject.field(creatorField)).map(SimplePrincipal::new);
BiMap<PublicKey, SimplePrincipal> developerKeys = developerKeysFromSlime(tenantObject.field(pemDeveloperKeysField));
TenantInfo info = tenantInfoFromSlime(tenantObject.field(tenantInfoField));
List<TenantSecretStore> tenantSecretStores = secretStoresFromSlime(tenantObject.field(secretStoresField));
ArchiveAccess archiveAccess = archiveAccessFromSlime(tenantObject);
Optional<Instant> invalidateUserSessionsBefore = SlimeUtils.optionalInstant(tenantObject.field(invalidateUserSessionsBeforeField));
Instant tenantRolesLastMaintained = SlimeUtils.instant(tenantObject.field(tenantRolesLastMaintainedField));
List<CloudAccountInfo> cloudAccountInfos = cloudAccountsFromSlime(tenantObject.field(cloudAccountsField));
Optional<BillingReference> billingReference = billingReferenceFrom(tenantObject.field(billingReferenceField));
PlanId planId = planId(tenantObject.field(planIdField));
return new CloudTenant(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores,
archiveAccess, invalidateUserSessionsBefore, tenantRolesLastMaintained,
cloudAccountInfos, billingReference, planId);
} | class TenantSerializer {
private static final String nameField = "name";
private static final String typeField = "type";
private static final String athenzDomainField = "athenzDomain";
private static final String propertyField = "property";
private static final String propertyIdField = "propertyId";
private static final String creatorField = "creator";
private static final String createdAtField = "createdAt";
private static final String deletedAtField = "deletedAt";
private static final String contactField = "contact";
private static final String contactUrlField = "contactUrl";
private static final String propertyUrlField = "propertyUrl";
private static final String issueTrackerUrlField = "issueTrackerUrl";
private static final String personsField = "persons";
private static final String personField = "person";
private static final String queueField = "queue";
private static final String componentField = "component";
private static final String billingInfoField = "billingInfo";
private static final String customerIdField = "customerId";
private static final String productCodeField = "productCode";
private static final String pemDeveloperKeysField = "pemDeveloperKeys";
private static final String tenantInfoField = "info";
private static final String lastLoginInfoField = "lastLoginInfo";
private static final String secretStoresField = "secretStores";
private static final String archiveAccessRoleField = "archiveAccessRole";
private static final String archiveAccessField = "archiveAccess";
private static final String awsArchiveAccessRoleField = "awsArchiveAccessRole";
private static final String gcpArchiveAccessMemberField = "gcpArchiveAccessMember";
private static final String invalidateUserSessionsBeforeField = "invalidateUserSessionsBefore";
private static final String tenantRolesLastMaintainedField = "tenantRolesLastMaintained";
private static final String billingReferenceField = "billingReference";
private static final String planIdField = "planId";
private static final String cloudAccountsField = "cloudAccounts";
private static final String accountField = "account";
private static final String templateVersionField = "templateVersion";
private static final String awsIdField = "awsId";
private static final String roleField = "role";
public Slime toSlime(Tenant tenant) {
Slime slime = new Slime();
Cursor tenantObject = slime.setObject();
tenantObject.setString(nameField, tenant.name().value());
tenantObject.setString(typeField, valueOf(tenant.type()));
tenantObject.setLong(createdAtField, tenant.createdAt().toEpochMilli());
toSlime(tenant.lastLoginInfo(), tenantObject.setObject(lastLoginInfoField));
tenantObject.setLong(tenantRolesLastMaintainedField, tenant.tenantRolesLastMaintained().toEpochMilli());
cloudAccountsToSlime(tenant.cloudAccounts(), tenantObject.setArray(cloudAccountsField));
switch (tenant.type()) {
case athenz: toSlime((AthenzTenant) tenant, tenantObject); break;
case cloud: toSlime((CloudTenant) tenant, tenantObject); break;
case deleted: toSlime((DeletedTenant) tenant, tenantObject); break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
return slime;
}
private void toSlime(AthenzTenant tenant, Cursor tenantObject) {
tenantObject.setString(athenzDomainField, tenant.domain().getName());
tenantObject.setString(propertyField, tenant.property().id());
tenant.propertyId().ifPresent(propertyId -> tenantObject.setString(propertyIdField, propertyId.id()));
tenant.contact().ifPresent(contact -> {
Cursor contactCursor = tenantObject.setObject(contactField);
writeContact(contact, contactCursor);
});
}
private void toSlime(CloudTenant tenant, Cursor root) {
var legacyBillingInfo = new BillingInfo("customer", "Vespa");
tenant.creator().ifPresent(creator -> root.setString(creatorField, creator.getName()));
developerKeysToSlime(tenant.developerKeys(), root.setArray(pemDeveloperKeysField));
toSlime(legacyBillingInfo, root.setObject(billingInfoField));
toSlime(tenant.info(), root);
toSlime(tenant.tenantSecretStores(), root);
toSlime(tenant.archiveAccess(), root);
tenant.billingReference().ifPresent(b -> toSlime(b, root));
tenant.invalidateUserSessionsBefore().ifPresent(instant -> root.setLong(invalidateUserSessionsBeforeField, instant.toEpochMilli()));
tenant.planId().ifPresent(id -> root.setString(planIdField, id));
}
private void toSlime(ArchiveAccess archiveAccess, Cursor root) {
Cursor object = root.setObject(archiveAccessField);
archiveAccess.awsRole().ifPresent(role -> object.setString(awsArchiveAccessRoleField, role));
archiveAccess.gcpMember().ifPresent(member -> object.setString(gcpArchiveAccessMemberField, member));
}
private void toSlime(DeletedTenant tenant, Cursor root) {
root.setLong(deletedAtField, tenant.deletedAt().toEpochMilli());
}
private void developerKeysToSlime(BiMap<PublicKey, ? extends Principal> keys, Cursor array) {
keys.forEach((key, user) -> {
Cursor object = array.addObject();
object.setString("key", KeyUtils.toPem(key));
object.setString("user", user.getName());
});
}
private void toSlime(BillingInfo billingInfo, Cursor billingInfoObject) {
billingInfoObject.setString(customerIdField, billingInfo.customerId());
billingInfoObject.setString(productCodeField, billingInfo.productCode());
}
private void toSlime(LastLoginInfo lastLoginInfo, Cursor lastLoginInfoObject) {
for (LastLoginInfo.UserLevel userLevel: LastLoginInfo.UserLevel.values()) {
lastLoginInfo.get(userLevel).ifPresent(lastLoginAt ->
lastLoginInfoObject.setLong(valueOf(userLevel), lastLoginAt.toEpochMilli()));
}
}
private void cloudAccountsToSlime(List<CloudAccountInfo> cloudAccounts, Cursor cloudAccountsObject) {
cloudAccounts.forEach(cloudAccountInfo -> {
Cursor object = cloudAccountsObject.addObject();
object.setString(accountField, cloudAccountInfo.cloudAccount().account());
object.setString(templateVersionField, cloudAccountInfo.templateVersion().toFullString());
});
}
public Tenant tenantFrom(Slime slime) {
Inspector tenantObject = slime.get();
Tenant.Type type = typeOf(tenantObject.field(typeField).asString());
switch (type) {
case athenz: return athenzTenantFrom(tenantObject);
case cloud: return cloudTenantFrom(tenantObject);
case deleted: return deletedTenantFrom(tenantObject);
default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'.");
}
}
private AthenzTenant athenzTenantFrom(Inspector tenantObject) {
TenantName name = TenantName.from(tenantObject.field(nameField).asString());
AthenzDomain domain = new AthenzDomain(tenantObject.field(athenzDomainField).asString());
Property property = new Property(tenantObject.field(propertyField).asString());
Optional<PropertyId> propertyId = SlimeUtils.optionalString(tenantObject.field(propertyIdField)).map(PropertyId::new);
Optional<Contact> contact = contactFrom(tenantObject.field(contactField));
Instant createdAt = SlimeUtils.instant(tenantObject.field(createdAtField));
LastLoginInfo lastLoginInfo = lastLoginInfoFromSlime(tenantObject.field(lastLoginInfoField));
Instant tenantRolesLastMaintained = SlimeUtils.instant(tenantObject.field(tenantRolesLastMaintainedField));
List<CloudAccountInfo> cloudAccountInfos = cloudAccountsFromSlime(tenantObject.field(cloudAccountsField));
return new AthenzTenant(name, domain, property, propertyId, contact, createdAt, lastLoginInfo, tenantRolesLastMaintained, cloudAccountInfos);
}
private DeletedTenant deletedTenantFrom(Inspector tenantObject) {
TenantName name = TenantName.from(tenantObject.field(nameField).asString());
Instant createdAt = SlimeUtils.instant(tenantObject.field(createdAtField));
Instant deletedAt = SlimeUtils.instant(tenantObject.field(deletedAtField));
return new DeletedTenant(name, createdAt, deletedAt);
}
private BiMap<PublicKey, SimplePrincipal> developerKeysFromSlime(Inspector array) {
ImmutableBiMap.Builder<PublicKey, SimplePrincipal> keys = ImmutableBiMap.builder();
array.traverse((ArrayTraverser) (__, keyObject) ->
keys.put(KeyUtils.fromPemEncodedPublicKey(keyObject.field("key").asString()),
new SimplePrincipal(keyObject.field("user").asString())));
return keys.build();
}
ArchiveAccess archiveAccessFromSlime(Inspector tenantObject) {
Optional<String> archiveAccessRole = SlimeUtils.optionalString(tenantObject.field(archiveAccessRoleField));
if (archiveAccessRole.isPresent()) {
return new ArchiveAccess().withAWSRole(archiveAccessRole.get());
}
Inspector object = tenantObject.field(archiveAccessField);
if (!object.valid()) {
return new ArchiveAccess();
}
Optional<String> awsArchiveAccessRole = SlimeUtils.optionalString(object.field(awsArchiveAccessRoleField));
Optional<String> gcpArchiveAccessMember = SlimeUtils.optionalString(object.field(gcpArchiveAccessMemberField));
return new ArchiveAccess()
.withAWSRole(awsArchiveAccessRole)
.withGCPMember(gcpArchiveAccessMember);
}
TenantInfo tenantInfoFromSlime(Inspector infoObject) {
if (!infoObject.valid()) return TenantInfo.empty();
return TenantInfo.empty()
.withName(infoObject.field("name").asString())
.withEmail(infoObject.field("email").asString())
.withWebsite(infoObject.field("website").asString())
.withContact(TenantContact.from(
infoObject.field("contactName").asString(),
new Email(infoObject.field("contactEmail").asString(), asBoolOrTrue(infoObject.field("contactEmailVerified")))))
.withAddress(tenantInfoAddressFromSlime(infoObject.field("address")))
.withBilling(tenantInfoBillingContactFromSlime(infoObject.field("billingContact")))
.withContacts(tenantContactsFrom(infoObject.field("contacts")));
}
private TenantAddress tenantInfoAddressFromSlime(Inspector addressObject) {
return TenantAddress.empty()
.withAddress(addressObject.field("addressLines").asString())
.withCode(addressObject.field("postalCodeOrZip").asString())
.withCity(addressObject.field("city").asString())
.withRegion(addressObject.field("stateRegionProvince").asString())
.withCountry(addressObject.field("country").asString());
}
private TenantBilling tenantInfoBillingContactFromSlime(Inspector billingObject) {
return TenantBilling.empty()
.withContact(TenantContact.from(
billingObject.field("name").asString(),
new Email(billingObject.field("email").asString(), true),
billingObject.field("phone").asString()))
.withAddress(tenantInfoAddressFromSlime(billingObject.field("address")));
}
private List<TenantSecretStore> secretStoresFromSlime(Inspector secretStoresObject) {
if (!secretStoresObject.valid()) return List.of();
return SlimeUtils.entriesStream(secretStoresObject)
.map(inspector -> new TenantSecretStore(
inspector.field(nameField).asString(),
inspector.field(awsIdField).asString(),
inspector.field(roleField).asString()))
.toList();
}
private LastLoginInfo lastLoginInfoFromSlime(Inspector lastLoginInfoObject) {
Map<LastLoginInfo.UserLevel, Instant> lastLoginByUserLevel = new HashMap<>();
lastLoginInfoObject.traverse((String name, Inspector value) ->
lastLoginByUserLevel.put(userLevelOf(name), SlimeUtils.instant(value)));
return new LastLoginInfo(lastLoginByUserLevel);
}
private List<CloudAccountInfo> cloudAccountsFromSlime(Inspector cloudAccountsObject) {
return SlimeUtils.entriesStream(cloudAccountsObject)
.map(inspector -> new CloudAccountInfo(
CloudAccount.from(inspector.field(accountField).asString()),
Version.fromString(inspector.field(templateVersionField).asString())))
.toList();
}
void toSlime(TenantInfo info, Cursor parentCursor) {
if (info.isEmpty()) return;
Cursor infoCursor = parentCursor.setObject("info");
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email().getEmailAddress());
infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email().getEmailAddress());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(List<TenantSecretStore> tenantSecretStores, Cursor parentCursor) {
if (tenantSecretStores.isEmpty()) return;
Cursor secretStoresCursor = parentCursor.setArray(secretStoresField);
tenantSecretStores.forEach(tenantSecretStore -> {
Cursor secretStoreCursor = secretStoresCursor.addObject();
secretStoreCursor.setString(nameField, tenantSecretStore.getName());
secretStoreCursor.setString(awsIdField, tenantSecretStore.getAwsId());
secretStoreCursor.setString(roleField, tenantSecretStore.getRole());
});
}
private void toSlime(TenantContacts contacts, Cursor parent) {
if (contacts.isEmpty()) return;
var cursor = parent.setArray("contacts");
contacts.all().forEach(contact -> writeContact(contact, cursor.addObject()));
}
private void toSlime(BillingReference reference, Cursor parent) {
var cursor = parent.setObject(billingReferenceField);
cursor.setString("reference", reference.reference());
cursor.setLong("updated", reference.updated().toEpochMilli());
}
private Optional<BillingReference> billingReferenceFrom(Inspector object) {
if (! object.valid()) return Optional.empty();
return Optional.of(new BillingReference(
object.field("reference").asString(),
SlimeUtils.instant(object.field("updated"))));
}
private Optional<String> planId(Inspector object) {
if (! object.valid()) return Optional.empty();
return Optional.of(object.asString());
}
private TenantContacts tenantContactsFrom(Inspector object) {
List<TenantContacts.Contact> contacts = SlimeUtils.entriesStream(object)
.map(this::readContact)
.toList();
return new TenantContacts(contacts);
}
private Optional<Contact> contactFrom(Inspector object) {
if ( ! object.valid()) return Optional.empty();
URI contactUrl = URI.create(object.field(contactUrlField).asString());
URI propertyUrl = URI.create(object.field(propertyUrlField).asString());
URI issueTrackerUrl = URI.create(object.field(issueTrackerUrlField).asString());
List<List<String>> persons = personsFrom(object.field(personsField));
String queue = object.field(queueField).asString();
Optional<String> component = object.field(componentField).valid() ? Optional.of(object.field(componentField).asString()) : Optional.empty();
return Optional.of(new Contact(contactUrl,
propertyUrl,
issueTrackerUrl,
persons,
queue,
component));
}
private void writeContact(Contact contact, Cursor contactCursor) {
contactCursor.setString(contactUrlField, contact.url().toString());
contactCursor.setString(propertyUrlField, contact.propertyUrl().toString());
contactCursor.setString(issueTrackerUrlField, contact.issueTrackerUrl().toString());
Cursor personsArray = contactCursor.setArray(personsField);
contact.persons().forEach(personList -> {
Cursor personArray = personsArray.addArray();
personList.forEach(person -> {
Cursor personObject = personArray.addObject();
personObject.setString(personField, person);
});
});
contactCursor.setString(queueField, contact.queue());
contact.component().ifPresent(component -> contactCursor.setString(componentField, component));
}
private List<List<String>> personsFrom(Inspector array) {
List<List<String>> personLists = new ArrayList<>();
array.traverse((ArrayTraverser) (i, personArray) -> {
List<String> persons = new ArrayList<>();
personArray.traverse((ArrayTraverser) (j, inspector) -> persons.add(inspector.field("person").asString()));
personLists.add(persons);
});
return personLists;
}
private void writeContact(TenantContacts.Contact contact, Cursor cursor) {
cursor.setString("type", contact.type().value());
Cursor audiencesArray = cursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
var data = cursor.setObject("data");
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
data.setString("email", email.email().getEmailAddress());
data.setBool("emailVerified", email.email().isVerified());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
}
private TenantContacts.Contact readContact(Inspector inspector) {
var type = TenantContacts.Type.from(inspector.field("type").asString())
.orElseThrow(() -> new RuntimeException("Unknown type: " + inspector.field("type").asString()));
var audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> TenantSerializer.fromAudience(audience.asString()))
.toList();
switch (type) {
case EMAIL:
var isVerified = asBoolOrTrue(inspector.field("data").field("emailVerified"));
return new TenantContacts.EmailContact(audiences, new Email(inspector.field("data").field("email").asString(), isVerified));
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + type);
}
}
private static Tenant.Type typeOf(String value) {
switch (value) {
case "athenz": return Tenant.Type.athenz;
case "cloud": return Tenant.Type.cloud;
case "deleted": return Tenant.Type.deleted;
default: throw new IllegalArgumentException("Unknown tenant type '" + value + "'.");
}
}
private static String valueOf(Tenant.Type type) {
switch (type) {
case athenz: return "athenz";
case cloud: return "cloud";
case deleted: return "deleted";
default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'.");
}
}
private static LastLoginInfo.UserLevel userLevelOf(String value) {
switch (value) {
case "user": return LastLoginInfo.UserLevel.user;
case "developer": return LastLoginInfo.UserLevel.developer;
case "administrator": return LastLoginInfo.UserLevel.administrator;
default: throw new IllegalArgumentException("Unknown user level '" + value + "'.");
}
}
private static String valueOf(LastLoginInfo.UserLevel userLevel) {
switch (userLevel) {
case user: return "user";
case developer: return "developer";
case administrator: return "administrator";
default: throw new IllegalArgumentException("Unexpected user level '" + userLevel + "'.");
}
}
private static TenantContacts.Audience fromAudience(String value) {
switch (value) {
case "tenant": return TenantContacts.Audience.TENANT;
case "notifications": return TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
}
}
private static String toAudience(TenantContacts.Audience audience) {
switch (audience) {
case TENANT: return "tenant";
case NOTIFICATIONS: return "notifications";
default: throw new IllegalArgumentException("Unexpected contact audience '" + audience + "'.");
}
}
private boolean asBoolOrTrue(Inspector inspector) {
return !inspector.valid() || inspector.asBool();
}
} | class TenantSerializer {
private static final String nameField = "name";
private static final String typeField = "type";
private static final String athenzDomainField = "athenzDomain";
private static final String propertyField = "property";
private static final String propertyIdField = "propertyId";
private static final String creatorField = "creator";
private static final String createdAtField = "createdAt";
private static final String deletedAtField = "deletedAt";
private static final String contactField = "contact";
private static final String contactUrlField = "contactUrl";
private static final String propertyUrlField = "propertyUrl";
private static final String issueTrackerUrlField = "issueTrackerUrl";
private static final String personsField = "persons";
private static final String personField = "person";
private static final String queueField = "queue";
private static final String componentField = "component";
private static final String billingInfoField = "billingInfo";
private static final String customerIdField = "customerId";
private static final String productCodeField = "productCode";
private static final String pemDeveloperKeysField = "pemDeveloperKeys";
private static final String tenantInfoField = "info";
private static final String lastLoginInfoField = "lastLoginInfo";
private static final String secretStoresField = "secretStores";
private static final String archiveAccessRoleField = "archiveAccessRole";
private static final String archiveAccessField = "archiveAccess";
private static final String awsArchiveAccessRoleField = "awsArchiveAccessRole";
private static final String gcpArchiveAccessMemberField = "gcpArchiveAccessMember";
private static final String invalidateUserSessionsBeforeField = "invalidateUserSessionsBefore";
private static final String tenantRolesLastMaintainedField = "tenantRolesLastMaintained";
private static final String billingReferenceField = "billingReference";
private static final String planIdField = "planId";
private static final String cloudAccountsField = "cloudAccounts";
private static final String accountField = "account";
private static final String templateVersionField = "templateVersion";
private static final String awsIdField = "awsId";
private static final String roleField = "role";
public Slime toSlime(Tenant tenant) {
Slime slime = new Slime();
Cursor tenantObject = slime.setObject();
tenantObject.setString(nameField, tenant.name().value());
tenantObject.setString(typeField, valueOf(tenant.type()));
tenantObject.setLong(createdAtField, tenant.createdAt().toEpochMilli());
toSlime(tenant.lastLoginInfo(), tenantObject.setObject(lastLoginInfoField));
tenantObject.setLong(tenantRolesLastMaintainedField, tenant.tenantRolesLastMaintained().toEpochMilli());
cloudAccountsToSlime(tenant.cloudAccounts(), tenantObject.setArray(cloudAccountsField));
switch (tenant.type()) {
case athenz: toSlime((AthenzTenant) tenant, tenantObject); break;
case cloud: toSlime((CloudTenant) tenant, tenantObject); break;
case deleted: toSlime((DeletedTenant) tenant, tenantObject); break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
return slime;
}
private void toSlime(AthenzTenant tenant, Cursor tenantObject) {
tenantObject.setString(athenzDomainField, tenant.domain().getName());
tenantObject.setString(propertyField, tenant.property().id());
tenant.propertyId().ifPresent(propertyId -> tenantObject.setString(propertyIdField, propertyId.id()));
tenant.contact().ifPresent(contact -> {
Cursor contactCursor = tenantObject.setObject(contactField);
writeContact(contact, contactCursor);
});
}
private void toSlime(CloudTenant tenant, Cursor root) {
var legacyBillingInfo = new BillingInfo("customer", "Vespa");
tenant.creator().ifPresent(creator -> root.setString(creatorField, creator.getName()));
developerKeysToSlime(tenant.developerKeys(), root.setArray(pemDeveloperKeysField));
toSlime(legacyBillingInfo, root.setObject(billingInfoField));
toSlime(tenant.info(), root);
toSlime(tenant.tenantSecretStores(), root);
toSlime(tenant.archiveAccess(), root);
tenant.billingReference().ifPresent(b -> toSlime(b, root));
tenant.invalidateUserSessionsBefore().ifPresent(instant -> root.setLong(invalidateUserSessionsBeforeField, instant.toEpochMilli()));
root.setString(planIdField, tenant.planId().value());
}
private void toSlime(ArchiveAccess archiveAccess, Cursor root) {
Cursor object = root.setObject(archiveAccessField);
archiveAccess.awsRole().ifPresent(role -> object.setString(awsArchiveAccessRoleField, role));
archiveAccess.gcpMember().ifPresent(member -> object.setString(gcpArchiveAccessMemberField, member));
}
private void toSlime(DeletedTenant tenant, Cursor root) {
root.setLong(deletedAtField, tenant.deletedAt().toEpochMilli());
}
private void developerKeysToSlime(BiMap<PublicKey, ? extends Principal> keys, Cursor array) {
keys.forEach((key, user) -> {
Cursor object = array.addObject();
object.setString("key", KeyUtils.toPem(key));
object.setString("user", user.getName());
});
}
private void toSlime(BillingInfo billingInfo, Cursor billingInfoObject) {
billingInfoObject.setString(customerIdField, billingInfo.customerId());
billingInfoObject.setString(productCodeField, billingInfo.productCode());
}
private void toSlime(LastLoginInfo lastLoginInfo, Cursor lastLoginInfoObject) {
for (LastLoginInfo.UserLevel userLevel: LastLoginInfo.UserLevel.values()) {
lastLoginInfo.get(userLevel).ifPresent(lastLoginAt ->
lastLoginInfoObject.setLong(valueOf(userLevel), lastLoginAt.toEpochMilli()));
}
}
private void cloudAccountsToSlime(List<CloudAccountInfo> cloudAccounts, Cursor cloudAccountsObject) {
cloudAccounts.forEach(cloudAccountInfo -> {
Cursor object = cloudAccountsObject.addObject();
object.setString(accountField, cloudAccountInfo.cloudAccount().account());
object.setString(templateVersionField, cloudAccountInfo.templateVersion().toFullString());
});
}
public Tenant tenantFrom(Slime slime) {
Inspector tenantObject = slime.get();
Tenant.Type type = typeOf(tenantObject.field(typeField).asString());
switch (type) {
case athenz: return athenzTenantFrom(tenantObject);
case cloud: return cloudTenantFrom(tenantObject);
case deleted: return deletedTenantFrom(tenantObject);
default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'.");
}
}
private AthenzTenant athenzTenantFrom(Inspector tenantObject) {
TenantName name = TenantName.from(tenantObject.field(nameField).asString());
AthenzDomain domain = new AthenzDomain(tenantObject.field(athenzDomainField).asString());
Property property = new Property(tenantObject.field(propertyField).asString());
Optional<PropertyId> propertyId = SlimeUtils.optionalString(tenantObject.field(propertyIdField)).map(PropertyId::new);
Optional<Contact> contact = contactFrom(tenantObject.field(contactField));
Instant createdAt = SlimeUtils.instant(tenantObject.field(createdAtField));
LastLoginInfo lastLoginInfo = lastLoginInfoFromSlime(tenantObject.field(lastLoginInfoField));
Instant tenantRolesLastMaintained = SlimeUtils.instant(tenantObject.field(tenantRolesLastMaintainedField));
List<CloudAccountInfo> cloudAccountInfos = cloudAccountsFromSlime(tenantObject.field(cloudAccountsField));
return new AthenzTenant(name, domain, property, propertyId, contact, createdAt, lastLoginInfo, tenantRolesLastMaintained, cloudAccountInfos);
}
private DeletedTenant deletedTenantFrom(Inspector tenantObject) {
TenantName name = TenantName.from(tenantObject.field(nameField).asString());
Instant createdAt = SlimeUtils.instant(tenantObject.field(createdAtField));
Instant deletedAt = SlimeUtils.instant(tenantObject.field(deletedAtField));
return new DeletedTenant(name, createdAt, deletedAt);
}
private BiMap<PublicKey, SimplePrincipal> developerKeysFromSlime(Inspector array) {
ImmutableBiMap.Builder<PublicKey, SimplePrincipal> keys = ImmutableBiMap.builder();
array.traverse((ArrayTraverser) (__, keyObject) ->
keys.put(KeyUtils.fromPemEncodedPublicKey(keyObject.field("key").asString()),
new SimplePrincipal(keyObject.field("user").asString())));
return keys.build();
}
ArchiveAccess archiveAccessFromSlime(Inspector tenantObject) {
Optional<String> archiveAccessRole = SlimeUtils.optionalString(tenantObject.field(archiveAccessRoleField));
if (archiveAccessRole.isPresent()) {
return new ArchiveAccess().withAWSRole(archiveAccessRole.get());
}
Inspector object = tenantObject.field(archiveAccessField);
if (!object.valid()) {
return new ArchiveAccess();
}
Optional<String> awsArchiveAccessRole = SlimeUtils.optionalString(object.field(awsArchiveAccessRoleField));
Optional<String> gcpArchiveAccessMember = SlimeUtils.optionalString(object.field(gcpArchiveAccessMemberField));
return new ArchiveAccess()
.withAWSRole(awsArchiveAccessRole)
.withGCPMember(gcpArchiveAccessMember);
}
TenantInfo tenantInfoFromSlime(Inspector infoObject) {
if (!infoObject.valid()) return TenantInfo.empty();
return TenantInfo.empty()
.withName(infoObject.field("name").asString())
.withEmail(infoObject.field("email").asString())
.withWebsite(infoObject.field("website").asString())
.withContact(TenantContact.from(
infoObject.field("contactName").asString(),
new Email(infoObject.field("contactEmail").asString(), asBoolOrTrue(infoObject.field("contactEmailVerified")))))
.withAddress(tenantInfoAddressFromSlime(infoObject.field("address")))
.withBilling(tenantInfoBillingContactFromSlime(infoObject.field("billingContact")))
.withContacts(tenantContactsFrom(infoObject.field("contacts")));
}
private TenantAddress tenantInfoAddressFromSlime(Inspector addressObject) {
return TenantAddress.empty()
.withAddress(addressObject.field("addressLines").asString())
.withCode(addressObject.field("postalCodeOrZip").asString())
.withCity(addressObject.field("city").asString())
.withRegion(addressObject.field("stateRegionProvince").asString())
.withCountry(addressObject.field("country").asString());
}
private TenantBilling tenantInfoBillingContactFromSlime(Inspector billingObject) {
return TenantBilling.empty()
.withContact(TenantContact.from(
billingObject.field("name").asString(),
new Email(billingObject.field("email").asString(), true),
billingObject.field("phone").asString()))
.withAddress(tenantInfoAddressFromSlime(billingObject.field("address")));
}
private List<TenantSecretStore> secretStoresFromSlime(Inspector secretStoresObject) {
if (!secretStoresObject.valid()) return List.of();
return SlimeUtils.entriesStream(secretStoresObject)
.map(inspector -> new TenantSecretStore(
inspector.field(nameField).asString(),
inspector.field(awsIdField).asString(),
inspector.field(roleField).asString()))
.toList();
}
private LastLoginInfo lastLoginInfoFromSlime(Inspector lastLoginInfoObject) {
Map<LastLoginInfo.UserLevel, Instant> lastLoginByUserLevel = new HashMap<>();
lastLoginInfoObject.traverse((String name, Inspector value) ->
lastLoginByUserLevel.put(userLevelOf(name), SlimeUtils.instant(value)));
return new LastLoginInfo(lastLoginByUserLevel);
}
private List<CloudAccountInfo> cloudAccountsFromSlime(Inspector cloudAccountsObject) {
return SlimeUtils.entriesStream(cloudAccountsObject)
.map(inspector -> new CloudAccountInfo(
CloudAccount.from(inspector.field(accountField).asString()),
Version.fromString(inspector.field(templateVersionField).asString())))
.toList();
}
void toSlime(TenantInfo info, Cursor parentCursor) {
if (info.isEmpty()) return;
Cursor infoCursor = parentCursor.setObject("info");
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email().getEmailAddress());
infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email().getEmailAddress());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(List<TenantSecretStore> tenantSecretStores, Cursor parentCursor) {
if (tenantSecretStores.isEmpty()) return;
Cursor secretStoresCursor = parentCursor.setArray(secretStoresField);
tenantSecretStores.forEach(tenantSecretStore -> {
Cursor secretStoreCursor = secretStoresCursor.addObject();
secretStoreCursor.setString(nameField, tenantSecretStore.getName());
secretStoreCursor.setString(awsIdField, tenantSecretStore.getAwsId());
secretStoreCursor.setString(roleField, tenantSecretStore.getRole());
});
}
private void toSlime(TenantContacts contacts, Cursor parent) {
if (contacts.isEmpty()) return;
var cursor = parent.setArray("contacts");
contacts.all().forEach(contact -> writeContact(contact, cursor.addObject()));
}
private void toSlime(BillingReference reference, Cursor parent) {
var cursor = parent.setObject(billingReferenceField);
cursor.setString("reference", reference.reference());
cursor.setLong("updated", reference.updated().toEpochMilli());
}
private Optional<BillingReference> billingReferenceFrom(Inspector object) {
if (! object.valid()) return Optional.empty();
return Optional.of(new BillingReference(
object.field("reference").asString(),
SlimeUtils.instant(object.field("updated"))));
}
private PlanId planId(Inspector object) {
if (! object.valid()) return PlanId.from("none");
return PlanId.from(object.asString());
}
private TenantContacts tenantContactsFrom(Inspector object) {
List<TenantContacts.Contact> contacts = SlimeUtils.entriesStream(object)
.map(this::readContact)
.toList();
return new TenantContacts(contacts);
}
private Optional<Contact> contactFrom(Inspector object) {
if ( ! object.valid()) return Optional.empty();
URI contactUrl = URI.create(object.field(contactUrlField).asString());
URI propertyUrl = URI.create(object.field(propertyUrlField).asString());
URI issueTrackerUrl = URI.create(object.field(issueTrackerUrlField).asString());
List<List<String>> persons = personsFrom(object.field(personsField));
String queue = object.field(queueField).asString();
Optional<String> component = object.field(componentField).valid() ? Optional.of(object.field(componentField).asString()) : Optional.empty();
return Optional.of(new Contact(contactUrl,
propertyUrl,
issueTrackerUrl,
persons,
queue,
component));
}
private void writeContact(Contact contact, Cursor contactCursor) {
contactCursor.setString(contactUrlField, contact.url().toString());
contactCursor.setString(propertyUrlField, contact.propertyUrl().toString());
contactCursor.setString(issueTrackerUrlField, contact.issueTrackerUrl().toString());
Cursor personsArray = contactCursor.setArray(personsField);
contact.persons().forEach(personList -> {
Cursor personArray = personsArray.addArray();
personList.forEach(person -> {
Cursor personObject = personArray.addObject();
personObject.setString(personField, person);
});
});
contactCursor.setString(queueField, contact.queue());
contact.component().ifPresent(component -> contactCursor.setString(componentField, component));
}
private List<List<String>> personsFrom(Inspector array) {
List<List<String>> personLists = new ArrayList<>();
array.traverse((ArrayTraverser) (i, personArray) -> {
List<String> persons = new ArrayList<>();
personArray.traverse((ArrayTraverser) (j, inspector) -> persons.add(inspector.field("person").asString()));
personLists.add(persons);
});
return personLists;
}
private void writeContact(TenantContacts.Contact contact, Cursor cursor) {
cursor.setString("type", contact.type().value());
Cursor audiencesArray = cursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
var data = cursor.setObject("data");
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
data.setString("email", email.email().getEmailAddress());
data.setBool("emailVerified", email.email().isVerified());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
}
private TenantContacts.Contact readContact(Inspector inspector) {
var type = TenantContacts.Type.from(inspector.field("type").asString())
.orElseThrow(() -> new RuntimeException("Unknown type: " + inspector.field("type").asString()));
var audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> TenantSerializer.fromAudience(audience.asString()))
.toList();
switch (type) {
case EMAIL:
var isVerified = asBoolOrTrue(inspector.field("data").field("emailVerified"));
return new TenantContacts.EmailContact(audiences, new Email(inspector.field("data").field("email").asString(), isVerified));
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + type);
}
}
private static Tenant.Type typeOf(String value) {
switch (value) {
case "athenz": return Tenant.Type.athenz;
case "cloud": return Tenant.Type.cloud;
case "deleted": return Tenant.Type.deleted;
default: throw new IllegalArgumentException("Unknown tenant type '" + value + "'.");
}
}
private static String valueOf(Tenant.Type type) {
switch (type) {
case athenz: return "athenz";
case cloud: return "cloud";
case deleted: return "deleted";
default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'.");
}
}
private static LastLoginInfo.UserLevel userLevelOf(String value) {
switch (value) {
case "user": return LastLoginInfo.UserLevel.user;
case "developer": return LastLoginInfo.UserLevel.developer;
case "administrator": return LastLoginInfo.UserLevel.administrator;
default: throw new IllegalArgumentException("Unknown user level '" + value + "'.");
}
}
private static String valueOf(LastLoginInfo.UserLevel userLevel) {
switch (userLevel) {
case user: return "user";
case developer: return "developer";
case administrator: return "administrator";
default: throw new IllegalArgumentException("Unexpected user level '" + userLevel + "'.");
}
}
private static TenantContacts.Audience fromAudience(String value) {
switch (value) {
case "tenant": return TenantContacts.Audience.TENANT;
case "notifications": return TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
}
}
private static String toAudience(TenantContacts.Audience audience) {
switch (audience) {
case TENANT: return "tenant";
case NOTIFICATIONS: return "notifications";
default: throw new IllegalArgumentException("Unexpected contact audience '" + audience + "'.");
}
}
private boolean asBoolOrTrue(Inspector inspector) {
return !inspector.valid() || inspector.asBool();
}
} |
🎖️ | private static Module createBaseModule(ServerConfig serverConfig, ConnectorConfig connectorConfig) {
return Modules.combine(
new AbstractModule() {
@Override
protected void configure() {
bind(JettyHttpServer.class).in(Singleton.class);
bind(ServerConfig.class).toInstance(serverConfig);
bind(ConnectorConfig.class).toInstance(connectorConfig);
bind(FilterBindings.class).toInstance(new FilterBindings.Builder().setStrictFiltering(serverConfig.strictFiltering()).build());
bind(ConnectionLog.class).toInstance(new VoidConnectionLog());
bind(RequestLog.class).toInstance(new VoidRequestLog());
}
},
new ConnectorFactoryRegistryModule(connectorConfig));
} | bind(JettyHttpServer.class).in(Singleton.class); | private static Module createBaseModule(ServerConfig serverConfig, ConnectorConfig connectorConfig) {
return Modules.combine(
new AbstractModule() {
@Override
protected void configure() {
bind(JettyHttpServer.class).in(Singleton.class);
bind(ServerConfig.class).toInstance(serverConfig);
bind(ConnectorConfig.class).toInstance(connectorConfig);
bind(FilterBindings.class).toInstance(new FilterBindings.Builder().setStrictFiltering(serverConfig.strictFiltering()).build());
bind(ConnectionLog.class).toInstance(new VoidConnectionLog());
bind(RequestLog.class).toInstance(new VoidRequestLog());
}
},
new ConnectorFactoryRegistryModule(connectorConfig));
} | class TestDriver implements AutoCloseable {
private final com.yahoo.jdisc.test.TestDriver jdiscCoreTestDriver;
private final JettyHttpServer server;
private final JettyHttpServerContext context;
private final SSLContext sslContext;
private TestDriver(Builder builder) {
ServerConfig serverConfig =
builder.serverConfig != null ? builder.serverConfig : new ServerConfig(new ServerConfig.Builder());
ConnectorConfig connectorConfig =
builder.connectorConfig != null ? builder.connectorConfig : new ConnectorConfig(new ConnectorConfig.Builder());
Module baseModule = createBaseModule(serverConfig, connectorConfig);
Module combinedModule =
builder.extraGuiceModules.isEmpty() ? baseModule : Modules.override(baseModule).with(builder.extraGuiceModules);
com.yahoo.jdisc.test.TestDriver jdiscCoreTestDriver =
com.yahoo.jdisc.test.TestDriver.newSimpleApplicationInstance(combinedModule);
ContainerBuilder containerBuilder = jdiscCoreTestDriver.newContainerBuilder();
JettyHttpServer server = containerBuilder.getInstance(JettyHttpServer.class);
this.context = containerBuilder.getInstance(JettyHttpServerContext.class);
containerBuilder.serverProviders().install(server);
builder.handlers.forEach((binding, handler) -> containerBuilder.serverBindings().bind(binding, handler));
jdiscCoreTestDriver.activateContainer(containerBuilder);
server.start();
this.jdiscCoreTestDriver = jdiscCoreTestDriver;
this.server = server;
this.sslContext = newSslContext(containerBuilder);
}
public static Builder newBuilder() { return new Builder(); }
public SSLContext sslContext() { return sslContext; }
public JettyHttpServer server() { return server; }
@Override public void close() { shutdown(); }
public boolean shutdown() {
context.deconstruct();
server.close();
server.release();
return jdiscCoreTestDriver.close();
}
private static SSLContext newSslContext(ContainerBuilder builder) {
ConnectorConfig.Ssl sslConfig = builder.getInstance(ConnectorConfig.class).ssl();
if (!sslConfig.enabled()) return null;
return new SslContextBuilder()
.withKeyStore(Paths.get(sslConfig.privateKeyFile()), Paths.get(sslConfig.certificateFile()))
.withTrustStore(Paths.get(sslConfig.caCertificateFile()))
.build();
}
public static class Builder {
private final SortedMap<String, RequestHandler> handlers = new TreeMap<>();
private final List<Module> extraGuiceModules = new ArrayList<>();
private ServerConfig serverConfig;
private ConnectorConfig connectorConfig;
private Builder() {}
public Builder withRequestHandler(String binding, RequestHandler handler) {
this.handlers.put(binding, handler); return this;
}
public Builder withRequestHandler(RequestHandler handler) { return withRequestHandler("http:
public Builder withServerConfig(ServerConfig config) { this.serverConfig = config; return this; }
public Builder withConnectorConfig(ConnectorConfig config) { this.connectorConfig = config; return this; }
public Builder withGuiceModule(Module module) { this.extraGuiceModules.add(module); return this; }
public TestDriver build() { return new TestDriver(this); }
}
} | class TestDriver implements AutoCloseable {
private final com.yahoo.jdisc.test.TestDriver jdiscCoreTestDriver;
private final JettyHttpServer server;
private final JettyHttpServerContext context;
private final SSLContext sslContext;
private TestDriver(Builder builder) {
ServerConfig serverConfig =
builder.serverConfig != null ? builder.serverConfig : new ServerConfig(new ServerConfig.Builder());
ConnectorConfig connectorConfig =
builder.connectorConfig != null ? builder.connectorConfig : new ConnectorConfig(new ConnectorConfig.Builder());
Module baseModule = createBaseModule(serverConfig, connectorConfig);
Module combinedModule =
builder.extraGuiceModules.isEmpty() ? baseModule : Modules.override(baseModule).with(builder.extraGuiceModules);
com.yahoo.jdisc.test.TestDriver jdiscCoreTestDriver =
com.yahoo.jdisc.test.TestDriver.newSimpleApplicationInstance(combinedModule);
ContainerBuilder containerBuilder = jdiscCoreTestDriver.newContainerBuilder();
JettyHttpServer server = containerBuilder.getInstance(JettyHttpServer.class);
this.context = containerBuilder.getInstance(JettyHttpServerContext.class);
containerBuilder.serverProviders().install(server);
builder.handlers.forEach((binding, handler) -> containerBuilder.serverBindings().bind(binding, handler));
jdiscCoreTestDriver.activateContainer(containerBuilder);
server.start();
this.jdiscCoreTestDriver = jdiscCoreTestDriver;
this.server = server;
this.sslContext = newSslContext(containerBuilder);
}
public static Builder newBuilder() { return new Builder(); }
public SSLContext sslContext() { return sslContext; }
public JettyHttpServer server() { return server; }
@Override public void close() { shutdown(); }
public boolean shutdown() {
context.deconstruct();
server.close();
server.release();
return jdiscCoreTestDriver.close();
}
private static SSLContext newSslContext(ContainerBuilder builder) {
ConnectorConfig.Ssl sslConfig = builder.getInstance(ConnectorConfig.class).ssl();
if (!sslConfig.enabled()) return null;
return new SslContextBuilder()
.withKeyStore(Paths.get(sslConfig.privateKeyFile()), Paths.get(sslConfig.certificateFile()))
.withTrustStore(Paths.get(sslConfig.caCertificateFile()))
.build();
}
public static class Builder {
private final SortedMap<String, RequestHandler> handlers = new TreeMap<>();
private final List<Module> extraGuiceModules = new ArrayList<>();
private ServerConfig serverConfig;
private ConnectorConfig connectorConfig;
private Builder() {}
public Builder withRequestHandler(String binding, RequestHandler handler) {
this.handlers.put(binding, handler); return this;
}
public Builder withRequestHandler(RequestHandler handler) { return withRequestHandler("http:
public Builder withServerConfig(ServerConfig config) { this.serverConfig = config; return this; }
public Builder withConnectorConfig(ConnectorConfig config) { this.connectorConfig = config; return this; }
public Builder withGuiceModule(Module module) { this.extraGuiceModules.add(module); return this; }
public TestDriver build() { return new TestDriver(this); }
}
} |
😜 | private static Module createBaseModule(ServerConfig serverConfig, ConnectorConfig connectorConfig) {
return Modules.combine(
new AbstractModule() {
@Override
protected void configure() {
bind(JettyHttpServer.class).in(Singleton.class);
bind(ServerConfig.class).toInstance(serverConfig);
bind(ConnectorConfig.class).toInstance(connectorConfig);
bind(FilterBindings.class).toInstance(new FilterBindings.Builder().setStrictFiltering(serverConfig.strictFiltering()).build());
bind(ConnectionLog.class).toInstance(new VoidConnectionLog());
bind(RequestLog.class).toInstance(new VoidRequestLog());
}
},
new ConnectorFactoryRegistryModule(connectorConfig));
} | bind(JettyHttpServer.class).in(Singleton.class); | private static Module createBaseModule(ServerConfig serverConfig, ConnectorConfig connectorConfig) {
return Modules.combine(
new AbstractModule() {
@Override
protected void configure() {
bind(JettyHttpServer.class).in(Singleton.class);
bind(ServerConfig.class).toInstance(serverConfig);
bind(ConnectorConfig.class).toInstance(connectorConfig);
bind(FilterBindings.class).toInstance(new FilterBindings.Builder().setStrictFiltering(serverConfig.strictFiltering()).build());
bind(ConnectionLog.class).toInstance(new VoidConnectionLog());
bind(RequestLog.class).toInstance(new VoidRequestLog());
}
},
new ConnectorFactoryRegistryModule(connectorConfig));
} | class TestDriver implements AutoCloseable {
private final com.yahoo.jdisc.test.TestDriver jdiscCoreTestDriver;
private final JettyHttpServer server;
private final JettyHttpServerContext context;
private final SSLContext sslContext;
private TestDriver(Builder builder) {
ServerConfig serverConfig =
builder.serverConfig != null ? builder.serverConfig : new ServerConfig(new ServerConfig.Builder());
ConnectorConfig connectorConfig =
builder.connectorConfig != null ? builder.connectorConfig : new ConnectorConfig(new ConnectorConfig.Builder());
Module baseModule = createBaseModule(serverConfig, connectorConfig);
Module combinedModule =
builder.extraGuiceModules.isEmpty() ? baseModule : Modules.override(baseModule).with(builder.extraGuiceModules);
com.yahoo.jdisc.test.TestDriver jdiscCoreTestDriver =
com.yahoo.jdisc.test.TestDriver.newSimpleApplicationInstance(combinedModule);
ContainerBuilder containerBuilder = jdiscCoreTestDriver.newContainerBuilder();
JettyHttpServer server = containerBuilder.getInstance(JettyHttpServer.class);
this.context = containerBuilder.getInstance(JettyHttpServerContext.class);
containerBuilder.serverProviders().install(server);
builder.handlers.forEach((binding, handler) -> containerBuilder.serverBindings().bind(binding, handler));
jdiscCoreTestDriver.activateContainer(containerBuilder);
server.start();
this.jdiscCoreTestDriver = jdiscCoreTestDriver;
this.server = server;
this.sslContext = newSslContext(containerBuilder);
}
public static Builder newBuilder() { return new Builder(); }
public SSLContext sslContext() { return sslContext; }
public JettyHttpServer server() { return server; }
@Override public void close() { shutdown(); }
public boolean shutdown() {
context.deconstruct();
server.close();
server.release();
return jdiscCoreTestDriver.close();
}
private static SSLContext newSslContext(ContainerBuilder builder) {
ConnectorConfig.Ssl sslConfig = builder.getInstance(ConnectorConfig.class).ssl();
if (!sslConfig.enabled()) return null;
return new SslContextBuilder()
.withKeyStore(Paths.get(sslConfig.privateKeyFile()), Paths.get(sslConfig.certificateFile()))
.withTrustStore(Paths.get(sslConfig.caCertificateFile()))
.build();
}
public static class Builder {
private final SortedMap<String, RequestHandler> handlers = new TreeMap<>();
private final List<Module> extraGuiceModules = new ArrayList<>();
private ServerConfig serverConfig;
private ConnectorConfig connectorConfig;
private Builder() {}
public Builder withRequestHandler(String binding, RequestHandler handler) {
this.handlers.put(binding, handler); return this;
}
public Builder withRequestHandler(RequestHandler handler) { return withRequestHandler("http:
public Builder withServerConfig(ServerConfig config) { this.serverConfig = config; return this; }
public Builder withConnectorConfig(ConnectorConfig config) { this.connectorConfig = config; return this; }
public Builder withGuiceModule(Module module) { this.extraGuiceModules.add(module); return this; }
public TestDriver build() { return new TestDriver(this); }
}
} | class TestDriver implements AutoCloseable {
private final com.yahoo.jdisc.test.TestDriver jdiscCoreTestDriver;
private final JettyHttpServer server;
private final JettyHttpServerContext context;
private final SSLContext sslContext;
private TestDriver(Builder builder) {
ServerConfig serverConfig =
builder.serverConfig != null ? builder.serverConfig : new ServerConfig(new ServerConfig.Builder());
ConnectorConfig connectorConfig =
builder.connectorConfig != null ? builder.connectorConfig : new ConnectorConfig(new ConnectorConfig.Builder());
Module baseModule = createBaseModule(serverConfig, connectorConfig);
Module combinedModule =
builder.extraGuiceModules.isEmpty() ? baseModule : Modules.override(baseModule).with(builder.extraGuiceModules);
com.yahoo.jdisc.test.TestDriver jdiscCoreTestDriver =
com.yahoo.jdisc.test.TestDriver.newSimpleApplicationInstance(combinedModule);
ContainerBuilder containerBuilder = jdiscCoreTestDriver.newContainerBuilder();
JettyHttpServer server = containerBuilder.getInstance(JettyHttpServer.class);
this.context = containerBuilder.getInstance(JettyHttpServerContext.class);
containerBuilder.serverProviders().install(server);
builder.handlers.forEach((binding, handler) -> containerBuilder.serverBindings().bind(binding, handler));
jdiscCoreTestDriver.activateContainer(containerBuilder);
server.start();
this.jdiscCoreTestDriver = jdiscCoreTestDriver;
this.server = server;
this.sslContext = newSslContext(containerBuilder);
}
public static Builder newBuilder() { return new Builder(); }
public SSLContext sslContext() { return sslContext; }
public JettyHttpServer server() { return server; }
@Override public void close() { shutdown(); }
public boolean shutdown() {
context.deconstruct();
server.close();
server.release();
return jdiscCoreTestDriver.close();
}
private static SSLContext newSslContext(ContainerBuilder builder) {
ConnectorConfig.Ssl sslConfig = builder.getInstance(ConnectorConfig.class).ssl();
if (!sslConfig.enabled()) return null;
return new SslContextBuilder()
.withKeyStore(Paths.get(sslConfig.privateKeyFile()), Paths.get(sslConfig.certificateFile()))
.withTrustStore(Paths.get(sslConfig.caCertificateFile()))
.build();
}
public static class Builder {
private final SortedMap<String, RequestHandler> handlers = new TreeMap<>();
private final List<Module> extraGuiceModules = new ArrayList<>();
private ServerConfig serverConfig;
private ConnectorConfig connectorConfig;
private Builder() {}
public Builder withRequestHandler(String binding, RequestHandler handler) {
this.handlers.put(binding, handler); return this;
}
public Builder withRequestHandler(RequestHandler handler) { return withRequestHandler("http:
public Builder withServerConfig(ServerConfig config) { this.serverConfig = config; return this; }
public Builder withConnectorConfig(ConnectorConfig config) { this.connectorConfig = config; return this; }
public Builder withGuiceModule(Module module) { this.extraGuiceModules.add(module); return this; }
public TestDriver build() { return new TestDriver(this); }
}
} |
CORS should be handled by the pre-flight request filter to ensure correct security headers are present, although for this particular case not critical. This might also break some cross-site usage when certain headers are missing. | public HttpResponse handle(HttpRequest request) {
Method method = request.getMethod();
try {
return switch (method) {
case OPTIONS -> new SvgHttpResponse("") {{ headers().add("Allow", "GET, HEAD, OPTIONS"); }};
case HEAD, GET -> get(request);
default -> ErrorResponse.methodNotAllowed("Method '" + method + "' is unsupported");
};
} catch (IllegalArgumentException|IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
} | case OPTIONS -> new SvgHttpResponse("") {{ headers().add("Allow", "GET, HEAD, OPTIONS"); }}; | public HttpResponse handle(HttpRequest request) {
Method method = request.getMethod();
try {
return switch (method) {
case OPTIONS -> new SvgHttpResponse("") {{
headers().add("Allow", "GET, HEAD, OPTIONS");
headers().add("Access-Control-Allow-Origin", "*");
headers().add("Access-Control-Allow-Methods", "GET, HEAD, OPTIONS");
}};
case HEAD, GET -> get(request);
default -> ErrorResponse.methodNotAllowed("Method '" + method + "' is unsupported");
};
} catch (IllegalArgumentException|IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
} | class BadgeApiHandler extends ThreadedHttpRequestHandler {
private final static Logger log = Logger.getLogger(BadgeApiHandler.class.getName());
private final Controller controller;
private final Map<Key, Value> badgeCache = new ConcurrentHashMap<>();
public BadgeApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
private HttpResponse get(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/badge/v1/{tenant}/{application}/{instance}")) return overviewBadge(path.get("tenant"), path.get("application"), path.get("instance"));
if (path.matches("/badge/v1/{tenant}/{application}/{instance}/{jobName}")) return historyBadge(path.get("tenant"), path.get("application"), path.get("instance"), path.get("jobName"), request.getProperty("historyLength"));
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
/** Returns a URI which points to an overview badge for the given application. */
private HttpResponse overviewBadge(String tenant, String application, String instance) {
ApplicationId id = ApplicationId.from(tenant, application, instance);
return cachedResponse(new Key(id, null, 0),
controller.clock().instant(),
() -> {
DeploymentStatus status = controller.jobController().deploymentStatus(controller.applications().requireApplication(TenantAndApplicationId.from(id)));
Predicate<JobStatus> isDeclaredJob = job -> status.jobSteps().get(job.id()) != null && status.jobSteps().get(job.id()).isDeclared();
return Badges.overviewBadge(id, status.jobs().instance(id.instance()).matching(isDeclaredJob));
});
}
/** Returns a URI which points to a history badge for the given application and job type. */
private HttpResponse historyBadge(String tenant, String application, String instance, String jobName, String historyLength) {
ApplicationId id = ApplicationId.from(tenant, application, instance);
JobType type = JobType.fromJobName(jobName, controller.zoneRegistry());
int length = historyLength == null ? 5 : Math.min(32, Math.max(0, Integer.parseInt(historyLength)));
return cachedResponse(new Key(id, type, length),
controller.clock().instant(),
() -> Badges.historyBadge(id,
controller.jobController().jobStatus(new JobId(id, type)),
length)
);
}
private HttpResponse cachedResponse(Key key, Instant now, Supplier<String> badge) {
return new SvgHttpResponse(badgeCache.compute(key, (__, value) -> {
return value != null && value.expiry.isAfter(now) ? value : new Value(badge.get(), now);
}).badgeSvg);
}
private static class SvgHttpResponse extends HttpResponse {
private final String svg;
SvgHttpResponse(String svg) { super(200); this.svg = svg; }
@Override public void render(OutputStream outputStream) throws IOException {
outputStream.write(svg.getBytes(UTF_8));
}
@Override public String getContentType() {
return "image/svg+xml";
}
}
private static class Key {
private final ApplicationId id;
private final JobType type;
private final int historyLength;
private Key(ApplicationId id, JobType type, int historyLength) {
this.id = id;
this.type = type;
this.historyLength = historyLength;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Key key = (Key) o;
return historyLength == key.historyLength && id.equals(key.id) && Objects.equals(type, key.type);
}
@Override
public int hashCode() {
return Objects.hash(id, type, historyLength);
}
}
private static class Value {
private final String badgeSvg;
private final Instant expiry;
private Value(String badgeSvg, Instant created) {
this.badgeSvg = badgeSvg;
this.expiry = created.plusSeconds(60);
}
}
} | class BadgeApiHandler extends ThreadedHttpRequestHandler {
private final static Logger log = Logger.getLogger(BadgeApiHandler.class.getName());
private final Controller controller;
private final Map<Key, Value> badgeCache = new ConcurrentHashMap<>();
public BadgeApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
private HttpResponse get(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/badge/v1/{tenant}/{application}/{instance}")) return overviewBadge(path.get("tenant"), path.get("application"), path.get("instance"));
if (path.matches("/badge/v1/{tenant}/{application}/{instance}/{jobName}")) return historyBadge(path.get("tenant"), path.get("application"), path.get("instance"), path.get("jobName"), request.getProperty("historyLength"));
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
/** Returns a URI which points to an overview badge for the given application. */
private HttpResponse overviewBadge(String tenant, String application, String instance) {
ApplicationId id = ApplicationId.from(tenant, application, instance);
return cachedResponse(new Key(id, null, 0),
controller.clock().instant(),
() -> {
DeploymentStatus status = controller.jobController().deploymentStatus(controller.applications().requireApplication(TenantAndApplicationId.from(id)));
Predicate<JobStatus> isDeclaredJob = job -> status.jobSteps().get(job.id()) != null && status.jobSteps().get(job.id()).isDeclared();
return Badges.overviewBadge(id, status.jobs().instance(id.instance()).matching(isDeclaredJob));
});
}
/** Returns a URI which points to a history badge for the given application and job type. */
private HttpResponse historyBadge(String tenant, String application, String instance, String jobName, String historyLength) {
ApplicationId id = ApplicationId.from(tenant, application, instance);
JobType type = JobType.fromJobName(jobName, controller.zoneRegistry());
int length = historyLength == null ? 5 : Math.min(32, Math.max(0, Integer.parseInt(historyLength)));
return cachedResponse(new Key(id, type, length),
controller.clock().instant(),
() -> Badges.historyBadge(id,
controller.jobController().jobStatus(new JobId(id, type)),
length)
);
}
private HttpResponse cachedResponse(Key key, Instant now, Supplier<String> badge) {
return new SvgHttpResponse(badgeCache.compute(key, (__, value) -> {
return value != null && value.expiry.isAfter(now) ? value : new Value(badge.get(), now);
}).badgeSvg);
}
private static class SvgHttpResponse extends HttpResponse {
private final String svg;
SvgHttpResponse(String svg) { super(200); this.svg = svg; }
@Override public void render(OutputStream outputStream) throws IOException {
outputStream.write(svg.getBytes(UTF_8));
}
@Override public String getContentType() {
return "image/svg+xml";
}
}
private static class Key {
private final ApplicationId id;
private final JobType type;
private final int historyLength;
private Key(ApplicationId id, JobType type, int historyLength) {
this.id = id;
this.type = type;
this.historyLength = historyLength;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Key key = (Key) o;
return historyLength == key.historyLength && id.equals(key.id) && Objects.equals(type, key.type);
}
@Override
public int hashCode() {
return Objects.hash(id, type, historyLength);
}
}
private static class Value {
private final String badgeSvg;
private final Instant expiry;
private Value(String badgeSvg, Instant created) {
this.badgeSvg = badgeSvg;
this.expiry = created.plusSeconds(60);
}
}
} |
This should have failed, but didn't, because the strings were `[C<hash>]` :) | public void requireThatCharWorks() {
new StringFieldValue("\t");
new StringFieldValue("\r");
new StringFieldValue("\n");
for (int c = 0x20; c < MIN_SURROGATE; c++) {
new StringFieldValue("" + Character.toChars(c)[0]);
}
for (int c = MAX_SURROGATE + 1; c < 0xFDD0; c++) {
new StringFieldValue("" + Character.toChars(c)[0]);
}
for (int c = 0xFDE0; c < 0xFFFE; c++) {
new StringFieldValue("" + Character.toChars(c)[0]);
}
for (int c = 0x10000; c < 0x1FFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0x20000; c < 0x2FFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0x30000; c < 0x3FFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0x40000; c < 0x4FFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0x50000; c < 0x5FFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0x60000; c < 0x6FFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0x70000; c < 0x7FFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0x80000; c < 0x8FFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0x90000; c < 0x9FFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0xA0000; c < 0xAFFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0xB0000; c < 0xBFFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0xC0000; c < 0xCFFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0xD0000; c < 0xDFFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0xE0000; c < 0xEFFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0xF0000; c < 0xFFFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
for (int c = 0x100000; c < 0x10FFFE; c++) {
char[] chars = Character.toChars(c);
new StringFieldValue("" + chars[0] + chars[1]);
}
} | new StringFieldValue("" + Character.toChars(c)[0]); | public void requireThatCharWorks() {
new StringFieldValue("\t");
new StringFieldValue("\r");
new StringFieldValue("\n");
for (int c = 0x20; c < MIN_SURROGATE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = MAX_SURROGATE + 1; c < 0xFDD0; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0xFDE0; c < 0xFFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0x10000; c < 0x1FFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0x20000; c < 0x2FFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0x30000; c < 0x3FFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0x40000; c < 0x4FFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0x50000; c < 0x5FFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0x60000; c < 0x6FFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0x70000; c < 0x7FFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0x80000; c < 0x8FFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0x90000; c < 0x9FFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0xA0000; c < 0xAFFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0xB0000; c < 0xBFFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0xC0000; c < 0xCFFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0xD0000; c < 0xDFFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0xE0000; c < 0xEFFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0xF0000; c < 0xFFFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
for (int c = 0x100000; c < 0x10FFFE; c++) {
new StringFieldValue(new String(Character.toChars(c)));
}
} | class StringFieldValueTestCase {
@Test
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails0() {
new StringFieldValue("\u0000");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1() {
new StringFieldValue("\u0001");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails2() {
new StringFieldValue("\u0002");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails3() {
new StringFieldValue("\u0003");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails4() {
new StringFieldValue("\u0004");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails5() {
new StringFieldValue("\u0005");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails6() {
new StringFieldValue("\u0006");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails7() {
new StringFieldValue("\u0007");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsB() {
new StringFieldValue("\u000B");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsC() {
new StringFieldValue("\u000C");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsE() {
new StringFieldValue("\u000E");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsF() {
new StringFieldValue("\u000F");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails10() {
new StringFieldValue("\u0010");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails11() {
new StringFieldValue("\u0011");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails12() {
new StringFieldValue("\u0012");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails13() {
new StringFieldValue("\u0013");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails14() {
new StringFieldValue("\u0014");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails15() {
new StringFieldValue("\u0015");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails16() {
new StringFieldValue("\u0016");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails17() {
new StringFieldValue("\u0017");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails18() {
new StringFieldValue("\u0018");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails19() {
new StringFieldValue("\u0019");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1A() {
new StringFieldValue("\u001A");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1B() {
new StringFieldValue("\u001B");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1C() {
new StringFieldValue("\u001C");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1D() {
new StringFieldValue("\u001D");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1E() {
new StringFieldValue("\u001E");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1F() {
new StringFieldValue("\u001F");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD0() {
new StringFieldValue("\uFDD0");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD1() {
new StringFieldValue("\uFDD1");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD2() {
new StringFieldValue("\uFDD2");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD3() {
new StringFieldValue("\uFDD3");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD4() {
new StringFieldValue("\uFDD4");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD5() {
new StringFieldValue("\uFDD5");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD6() {
new StringFieldValue("\uFDD6");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD7() {
new StringFieldValue("\uFDD7");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD8() {
new StringFieldValue("\uFDD8");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD9() {
new StringFieldValue("\uFDD9");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDDA() {
new StringFieldValue("\uFDDA");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDDB() {
new StringFieldValue("\uFDDB");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDDC() {
new StringFieldValue("\uFDDC");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDDD() {
new StringFieldValue("\uFDDD");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDDE() {
new StringFieldValue("\uFDDE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDDF() {
new StringFieldValue("\uFDDF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFFFE() {
new StringFieldValue("\uFFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFFFF() {
new StringFieldValue("\uFFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1FFFE() {
new StringFieldValue("\uD83F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1FFFF() {
new StringFieldValue("\uD83F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails2FFFE() {
new StringFieldValue("\uD87F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails2FFFF() {
new StringFieldValue("\uD87F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails3FFFE() {
new StringFieldValue("\uD8BF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails3FFFF() {
new StringFieldValue("\uD8BF\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails4FFFE() {
new StringFieldValue("\uD8FF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails4FFFF() {
new StringFieldValue("\uD8FF\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails5FFFE() {
new StringFieldValue("\uD93F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails5FFFF() {
new StringFieldValue("\uD93F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails6FFFE() {
new StringFieldValue("\uD97F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails6FFFF() {
new StringFieldValue("\uD97F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails7FFFE() {
new StringFieldValue("\uD9BF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails7FFFF() {
new StringFieldValue("\uD9BF\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails8FFFE() {
new StringFieldValue("\uD9FF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails8FFFF() {
new StringFieldValue("\uD9FF\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails9FFFE() {
new StringFieldValue("\uDA3F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails9FFFF() {
new StringFieldValue("\uDA3F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsAFFFE() {
new StringFieldValue("\uDA7F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsAFFFF() {
new StringFieldValue("\uDA7F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsBFFFE() {
new StringFieldValue("\uDABF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsBFFFF() {
new StringFieldValue("\uDABF\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsCFFFE() {
new StringFieldValue("\uDAFF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsCFFFF() {
new StringFieldValue("\uDAFF\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsDFFFE() {
new StringFieldValue("\uDB3F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsDFFFF() {
new StringFieldValue("\uDB3F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsEFFFE() {
new StringFieldValue("\uDB7F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsEFFFF() {
new StringFieldValue("\uDB7F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFFFFE() {
new StringFieldValue("\uDBBF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFFFFF() {
new StringFieldValue("\uDBBF\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails10FFFE() {
new StringFieldValue("\uDBFF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails10FFFF() {
new StringFieldValue("\uDBFF\uDFFF");
}
} | class StringFieldValueTestCase {
@Test
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails0() {
new StringFieldValue("\u0000");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1() {
new StringFieldValue("\u0001");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails2() {
new StringFieldValue("\u0002");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails3() {
new StringFieldValue("\u0003");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails4() {
new StringFieldValue("\u0004");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails5() {
new StringFieldValue("\u0005");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails6() {
new StringFieldValue("\u0006");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails7() {
new StringFieldValue("\u0007");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsB() {
new StringFieldValue("\u000B");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsC() {
new StringFieldValue("\u000C");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsE() {
new StringFieldValue("\u000E");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsF() {
new StringFieldValue("\u000F");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails10() {
new StringFieldValue("\u0010");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails11() {
new StringFieldValue("\u0011");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails12() {
new StringFieldValue("\u0012");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails13() {
new StringFieldValue("\u0013");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails14() {
new StringFieldValue("\u0014");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails15() {
new StringFieldValue("\u0015");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails16() {
new StringFieldValue("\u0016");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails17() {
new StringFieldValue("\u0017");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails18() {
new StringFieldValue("\u0018");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails19() {
new StringFieldValue("\u0019");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1A() {
new StringFieldValue("\u001A");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1B() {
new StringFieldValue("\u001B");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1C() {
new StringFieldValue("\u001C");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1D() {
new StringFieldValue("\u001D");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1E() {
new StringFieldValue("\u001E");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1F() {
new StringFieldValue("\u001F");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD0() {
new StringFieldValue("\uFDD0");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD1() {
new StringFieldValue("\uFDD1");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD2() {
new StringFieldValue("\uFDD2");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD3() {
new StringFieldValue("\uFDD3");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD4() {
new StringFieldValue("\uFDD4");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD5() {
new StringFieldValue("\uFDD5");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD6() {
new StringFieldValue("\uFDD6");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD7() {
new StringFieldValue("\uFDD7");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD8() {
new StringFieldValue("\uFDD8");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDD9() {
new StringFieldValue("\uFDD9");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDDA() {
new StringFieldValue("\uFDDA");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDDB() {
new StringFieldValue("\uFDDB");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDDC() {
new StringFieldValue("\uFDDC");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDDD() {
new StringFieldValue("\uFDDD");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDDE() {
new StringFieldValue("\uFDDE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFDDF() {
new StringFieldValue("\uFDDF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFFFE() {
new StringFieldValue("\uFFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFFFF() {
new StringFieldValue("\uFFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1FFFE() {
new StringFieldValue("\uD83F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails1FFFF() {
new StringFieldValue("\uD83F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails2FFFE() {
new StringFieldValue("\uD87F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails2FFFF() {
new StringFieldValue("\uD87F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails3FFFE() {
new StringFieldValue("\uD8BF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails3FFFF() {
new StringFieldValue("\uD8BF\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails4FFFE() {
new StringFieldValue("\uD8FF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails4FFFF() {
new StringFieldValue("\uD8FF\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails5FFFE() {
new StringFieldValue("\uD93F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails5FFFF() {
new StringFieldValue("\uD93F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails6FFFE() {
new StringFieldValue("\uD97F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails6FFFF() {
new StringFieldValue("\uD97F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails7FFFE() {
new StringFieldValue("\uD9BF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails7FFFF() {
new StringFieldValue("\uD9BF\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails8FFFE() {
new StringFieldValue("\uD9FF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails8FFFF() {
new StringFieldValue("\uD9FF\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails9FFFE() {
new StringFieldValue("\uDA3F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails9FFFF() {
new StringFieldValue("\uDA3F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsAFFFE() {
new StringFieldValue("\uDA7F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsAFFFF() {
new StringFieldValue("\uDA7F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsBFFFE() {
new StringFieldValue("\uDABF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsBFFFF() {
new StringFieldValue("\uDABF\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsCFFFE() {
new StringFieldValue("\uDAFF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsCFFFF() {
new StringFieldValue("\uDAFF\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsDFFFE() {
new StringFieldValue("\uDB3F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsDFFFF() {
new StringFieldValue("\uDB3F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsEFFFE() {
new StringFieldValue("\uDB7F\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsEFFFF() {
new StringFieldValue("\uDB7F\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFFFFE() {
new StringFieldValue("\uDBBF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFailsFFFFF() {
new StringFieldValue("\uDBBF\uDFFF");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails10FFFE() {
new StringFieldValue("\uDBFF\uDFFE");
}
@Test(expected = IllegalArgumentException.class)
public void requireThatControlCharFails10FFFF() {
new StringFieldValue("\uDBFF\uDFFF");
}
} |
```suggestion ``` | public PriceInformation price(List<ClusterResources> clusterResources, PricingInfo pricingInfo, Plan plan) {
return new PriceInformation(
BigDecimal.valueOf(clusterResources.stream()
.peek(System.out::println)
.mapToDouble(resources -> resources.nodes() *
(resources.nodeResources().vcpu() * 1000 +
resources.nodeResources().memoryGb() * 100 +
resources.nodeResources().diskGb() * 10))
.sum())
.setScale(2, RoundingMode.HALF_UP), BigDecimal.ZERO);
} | .peek(System.out::println) | public PriceInformation price(List<ClusterResources> clusterResources, PricingInfo pricingInfo, Plan plan) {
BigDecimal listPrice = BigDecimal.valueOf(clusterResources.stream()
.mapToDouble(resources -> resources.nodes() *
(resources.nodeResources().vcpu() * 1000 +
resources.nodeResources().memoryGb() * 100 +
resources.nodeResources().diskGb() * 10))
.sum())
.setScale(2, RoundingMode.HALF_UP);
BigDecimal volumeDiscount = new BigDecimal("-5.00");
BigDecimal committedAmountDiscount = new BigDecimal("0.00");
BigDecimal enclaveDiscount = new BigDecimal("0.00");
BigDecimal totalAmount = listPrice.add(volumeDiscount);
return new PriceInformation(listPrice, volumeDiscount, committedAmountDiscount, enclaveDiscount, totalAmount);
} | class MockPricingController implements PricingController {
@Override
} | class MockPricingController implements PricingController {
@Override
} |
I don't think this is quite right; except the value of `resources`, the other query parameters are regular query parameters, so the `&` and `=` should not be URL encoded? | String urlEncodedPriceInformation() {
var parameters = "supportLevel=standard&committedSpend=0&enclave=false" +
"&resources=nodes=1,vcpu=1,memoryGb=1,diskGb=10,gpuMemoryGb=0" +
"&resources=nodes=1,vcpu=1,memoryGb=1,diskGb=10,gpuMemoryGb=0";
return URLEncoder.encode(parameters, UTF_8);
} | return URLEncoder.encode(parameters, UTF_8); | String urlEncodedPriceInformation() {
String resources = URLEncoder.encode("nodes=1,vcpu=1,memoryGb=1,diskGb=10,gpuMemoryGb=0", UTF_8);
return "supportLevel=basic&committedSpend=0&enclave=false" +
"&resources=" + resources +
"&resources=" + resources;
} | class PricingApiHandlerTest extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/pricing/responses/";
@Test
void testPricingInfo() {
ContainerTester tester = new ContainerTester(container, responseFiles);
assertEquals(SystemName.Public, tester.controller().system());
var request = request("/pricing/v1/pricing?" + urlEncodedPriceInformation());
tester.assertResponse(request, """
{"listPrice":"2400.00","volumeDiscount":"0.00"}""",
200);
}
/**
* 2 clusters, with each having 1 node, with 1 vcpu, 1 Gb memory, 10 Gb disk and no GPU
* price will be 20000 + 2000 + 200
*/
} | class PricingApiHandlerTest extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/pricing/responses/";
@Test
void testPricingInfo() {
ContainerTester tester = new ContainerTester(container, responseFiles);
assertEquals(SystemName.Public, tester.controller().system());
var request = request("/pricing/v1/pricing?" + urlEncodedPriceInformation());
tester.assertJsonResponse(request, """
{
"priceInfo": [
{"description": "List price", "amount": "2400.00"},
{"description": "Volume discount", "amount": "-5.00"}
],
"totalAmount": "2395.00"
}
""",
200);
}
@Test
void testPricingInfoWithIncompleteParameter() {
ContainerTester tester = new ContainerTester(container, responseFiles);
assertEquals(SystemName.Public, tester.controller().system());
var request = request("/pricing/v1/pricing?" + urlEncodedPriceInformationWithMissingValueInResourcs());
tester.assertJsonResponse(request,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Error in query parameter, expected '=' between key and value: resources\"}",
400);
}
/**
* 2 clusters, with each having 1 node, with 1 vcpu, 1 Gb memory, 10 Gb disk and no GPU
* price will be 20000 + 2000 + 200
*/
String urlEncodedPriceInformationWithMissingValueInResourcs() {
return URLEncoder.encode("supportLevel=basic&committedSpend=0&enclave=false&resources", UTF_8);
}
} |
Let's make each element in this array be an object with "description" and "amount", e.g. ``` {"description":"List price","amount":"40.05"}, {"description":"Volume discount","amount":"5.37"}, ``` if amount is > 0. | private static SlimeJsonResponse response(PriceInformation priceInfo) {
var slime = new Slime();
Cursor cursor = slime.setObject();
var array = cursor.setArray("priceInfo");
array.addObject().setString("listPrice", SCALED_ZERO.add(priceInfo.listPrice()).toPlainString());
array.addObject().setString("volumeDiscount", SCALED_ZERO.add(priceInfo.volumeDiscount()).toPlainString());
array.addObject().setString("committedAmountDiscount", SCALED_ZERO.add(priceInfo.committedAmountDiscount()).toPlainString());
array.addObject().setString("enclaveDiscount", SCALED_ZERO.add(priceInfo.enclaveDiscount()).toPlainString());
cursor.setString("totalAmount", priceInfo.totalAmount().toPlainString());
return new SlimeJsonResponse(slime);
} | array.addObject().setString("listPrice", SCALED_ZERO.add(priceInfo.listPrice()).toPlainString()); | private static SlimeJsonResponse response(PriceInformation priceInfo) {
var slime = new Slime();
Cursor cursor = slime.setObject();
var array = cursor.setArray("priceInfo");
addItem(array, "List price", priceInfo.listPrice());
addItem(array, "Enclave discount", priceInfo.enclaveDiscount());
addItem(array, "Volume discount", priceInfo.volumeDiscount());
addItem(array, "Committed spend", priceInfo.committedAmountDiscount());
cursor.setString("totalAmount", priceInfo.totalAmount().toPlainString());
return new SlimeJsonResponse(slime);
} | class PricingApiHandler extends ThreadedHttpRequestHandler {
private static final Logger log = Logger.getLogger(PricingApiHandler.class.getName());
private static final BigDecimal SCALED_ZERO = BigDecimal.ZERO.setScale(2);
private final Controller controller;
@Inject
public PricingApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
if (request.getMethod() != GET)
return methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
try {
return handleGET(request);
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/pricing/v1/pricing")) return pricing(request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse pricing(HttpRequest request) {
String rawQuery = request.getUri().getRawQuery();
PriceInformation price = parseQuery(rawQuery);
return response(price);
}
private PriceInformation parseQuery(String rawQuery) {
String[] elements = URLDecoder.decode(rawQuery, UTF_8).split("&");
if (elements.length == 0) throw new IllegalArgumentException("no price information found in query");
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ClusterResources> clusterResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst()) {
case "committedSpend" -> committedSpend = parseDouble(entry.getSecond());
case "enclave" -> enclave = Boolean.parseBoolean(entry.getSecond());
case "planId" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportLevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "resources" -> clusterResources.add(clusterResources(entry.getSecond()));
}
}
if (clusterResources.size() < 1) throw new IllegalArgumentException("No cluster resources found in query");
PricingInfo pricingInfo = new PricingInfo(enclave, supportLevel, committedSpend);
return controller.serviceRegistry().pricingController().price(clusterResources, pricingInfo, plan);
}
private ClusterResources clusterResources(String resourcesString) {
String[] elements = resourcesString.split(",");
if (elements.length == 0)
throw new IllegalArgumentException("nothing found in cluster resources: " + resourcesString);
var nodes = 0;
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst()) {
case "nodes" -> nodes = parseInt(element.getSecond());
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memoryGb" -> memoryGb = parseDouble(element.getSecond());
case "diskGb" -> diskGb = parseDouble(element.getSecond());
case "gpuMemoryGb" -> gpuMemoryGb = parseDouble(element.getSecond());
}
}
var nodeResources = new NodeResources(vcpu, memoryGb, diskGb, 0);
if (gpuMemoryGb > 0)
nodeResources = nodeResources.with(new NodeResources.GpuResources(1, gpuMemoryGb));
return new ClusterResources(nodes, 1, nodeResources);
}
private List<Pair<String, String>> keysAndValues(String[] elements) {
return Arrays.stream(elements).map(element -> {
var index = element.indexOf("=");
if (index <= 0 ) throw new IllegalArgumentException("Error in query parameter, expected '=' between key and value: " + element);
return new Pair<>(element.substring(0, index), element.substring(index + 1));
})
.collect(Collectors.toList());
}
private Optional<Plan> plan(String element) {
return controller.serviceRegistry().planRegistry().plan(element);
}
} | class PricingApiHandler extends ThreadedHttpRequestHandler {
private static final Logger log = Logger.getLogger(PricingApiHandler.class.getName());
private static final BigDecimal SCALED_ZERO = BigDecimal.ZERO.setScale(2);
private final Controller controller;
@Inject
public PricingApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
if (request.getMethod() != GET)
return methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
try {
return handleGET(request);
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/pricing/v1/pricing")) return pricing(request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse pricing(HttpRequest request) {
String rawQuery = request.getUri().getRawQuery();
PriceInformation price = parseQuery(rawQuery);
return response(price);
}
private PriceInformation parseQuery(String rawQuery) {
String[] elements = URLDecoder.decode(rawQuery, UTF_8).split("&");
if (elements.length == 0) throw new IllegalArgumentException("no price information found in query");
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ClusterResources> clusterResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst()) {
case "committedSpend" -> committedSpend = parseDouble(entry.getSecond());
case "enclave" -> enclave = Boolean.parseBoolean(entry.getSecond());
case "planId" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportLevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "resources" -> clusterResources.add(clusterResources(entry.getSecond()));
}
}
if (clusterResources.size() < 1) throw new IllegalArgumentException("No cluster resources found in query");
PricingInfo pricingInfo = new PricingInfo(enclave, supportLevel, committedSpend);
return controller.serviceRegistry().pricingController().price(clusterResources, pricingInfo, plan);
}
private ClusterResources clusterResources(String resourcesString) {
String[] elements = resourcesString.split(",");
if (elements.length == 0)
throw new IllegalArgumentException("nothing found in cluster resources: " + resourcesString);
var nodes = 0;
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst()) {
case "nodes" -> nodes = parseInt(element.getSecond());
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memoryGb" -> memoryGb = parseDouble(element.getSecond());
case "diskGb" -> diskGb = parseDouble(element.getSecond());
case "gpuMemoryGb" -> gpuMemoryGb = parseDouble(element.getSecond());
}
}
var nodeResources = new NodeResources(vcpu, memoryGb, diskGb, 0);
if (gpuMemoryGb > 0)
nodeResources = nodeResources.with(new NodeResources.GpuResources(1, gpuMemoryGb));
return new ClusterResources(nodes, 1, nodeResources);
}
private List<Pair<String, String>> keysAndValues(String[] elements) {
return Arrays.stream(elements).map(element -> {
var index = element.indexOf("=");
if (index <= 0 ) throw new IllegalArgumentException("Error in query parameter, expected '=' between key and value: " + element);
return new Pair<>(element.substring(0, index), element.substring(index + 1));
})
.collect(Collectors.toList());
}
private Optional<Plan> plan(String element) {
return controller.serviceRegistry().planRegistry().plan(element);
}
private static void addItem(Cursor array, String name, BigDecimal amount) {
if (amount.compareTo(BigDecimal.ZERO) != 0) {
var o = array.addObject();
o.setString("description", name);
o.setString("amount", SCALED_ZERO.add(amount).toPlainString());
}
}
} |
This one is just the name of the JSON key, so can be left as `totalAmount`/`total` | private static SlimeJsonResponse response(PriceInformation priceInfo) {
var slime = new Slime();
Cursor cursor = slime.setObject();
var array = cursor.setArray("priceInfo");
addItem(array, "List price", priceInfo.listPrice());
addItem(array, "Volume discount", priceInfo.volumeDiscount());
addItem(array, "Committed spend discount", priceInfo.committedAmountDiscount());
addItem(array, "Enclave discount", priceInfo.enclaveDiscount());
cursor.setString("Total", priceInfo.totalAmount().toPlainString());
return new SlimeJsonResponse(slime);
} | cursor.setString("Total", priceInfo.totalAmount().toPlainString()); | private static SlimeJsonResponse response(PriceInformation priceInfo) {
var slime = new Slime();
Cursor cursor = slime.setObject();
var array = cursor.setArray("priceInfo");
addItem(array, "List price", priceInfo.listPrice());
addItem(array, "Enclave discount", priceInfo.enclaveDiscount());
addItem(array, "Volume discount", priceInfo.volumeDiscount());
addItem(array, "Committed spend", priceInfo.committedAmountDiscount());
cursor.setString("totalAmount", priceInfo.totalAmount().toPlainString());
return new SlimeJsonResponse(slime);
} | class PricingApiHandler extends ThreadedHttpRequestHandler {
private static final Logger log = Logger.getLogger(PricingApiHandler.class.getName());
private static final BigDecimal SCALED_ZERO = BigDecimal.ZERO.setScale(2);
private final Controller controller;
@Inject
public PricingApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
if (request.getMethod() != GET)
return methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
try {
return handleGET(request);
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/pricing/v1/pricing")) return pricing(request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse pricing(HttpRequest request) {
String rawQuery = request.getUri().getRawQuery();
PriceInformation price = parseQuery(rawQuery);
return response(price);
}
private PriceInformation parseQuery(String rawQuery) {
String[] elements = URLDecoder.decode(rawQuery, UTF_8).split("&");
if (elements.length == 0) throw new IllegalArgumentException("no price information found in query");
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ClusterResources> clusterResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst()) {
case "committedSpend" -> committedSpend = parseDouble(entry.getSecond());
case "enclave" -> enclave = Boolean.parseBoolean(entry.getSecond());
case "planId" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportLevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "resources" -> clusterResources.add(clusterResources(entry.getSecond()));
}
}
if (clusterResources.size() < 1) throw new IllegalArgumentException("No cluster resources found in query");
PricingInfo pricingInfo = new PricingInfo(enclave, supportLevel, committedSpend);
return controller.serviceRegistry().pricingController().price(clusterResources, pricingInfo, plan);
}
private ClusterResources clusterResources(String resourcesString) {
String[] elements = resourcesString.split(",");
if (elements.length == 0)
throw new IllegalArgumentException("nothing found in cluster resources: " + resourcesString);
var nodes = 0;
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst()) {
case "nodes" -> nodes = parseInt(element.getSecond());
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memoryGb" -> memoryGb = parseDouble(element.getSecond());
case "diskGb" -> diskGb = parseDouble(element.getSecond());
case "gpuMemoryGb" -> gpuMemoryGb = parseDouble(element.getSecond());
}
}
var nodeResources = new NodeResources(vcpu, memoryGb, diskGb, 0);
if (gpuMemoryGb > 0)
nodeResources = nodeResources.with(new NodeResources.GpuResources(1, gpuMemoryGb));
return new ClusterResources(nodes, 1, nodeResources);
}
private List<Pair<String, String>> keysAndValues(String[] elements) {
return Arrays.stream(elements).map(element -> {
var index = element.indexOf("=");
if (index <= 0 ) throw new IllegalArgumentException("Error in query parameter, expected '=' between key and value: " + element);
return new Pair<>(element.substring(0, index), element.substring(index + 1));
})
.collect(Collectors.toList());
}
private Optional<Plan> plan(String element) {
return controller.serviceRegistry().planRegistry().plan(element);
}
private static void addItem(Cursor array, String name, BigDecimal amount) {
if (amount.compareTo(BigDecimal.ZERO) > 0) {
var o = array.addObject();
o.setString("description", name);
o.setString("amount", SCALED_ZERO.add(amount).toPlainString());
}
}
} | class PricingApiHandler extends ThreadedHttpRequestHandler {
private static final Logger log = Logger.getLogger(PricingApiHandler.class.getName());
private static final BigDecimal SCALED_ZERO = BigDecimal.ZERO.setScale(2);
private final Controller controller;
@Inject
public PricingApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
if (request.getMethod() != GET)
return methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
try {
return handleGET(request);
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/pricing/v1/pricing")) return pricing(request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse pricing(HttpRequest request) {
String rawQuery = request.getUri().getRawQuery();
PriceInformation price = parseQuery(rawQuery);
return response(price);
}
private PriceInformation parseQuery(String rawQuery) {
String[] elements = URLDecoder.decode(rawQuery, UTF_8).split("&");
if (elements.length == 0) throw new IllegalArgumentException("no price information found in query");
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ClusterResources> clusterResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst()) {
case "committedSpend" -> committedSpend = parseDouble(entry.getSecond());
case "enclave" -> enclave = Boolean.parseBoolean(entry.getSecond());
case "planId" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportLevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "resources" -> clusterResources.add(clusterResources(entry.getSecond()));
}
}
if (clusterResources.size() < 1) throw new IllegalArgumentException("No cluster resources found in query");
PricingInfo pricingInfo = new PricingInfo(enclave, supportLevel, committedSpend);
return controller.serviceRegistry().pricingController().price(clusterResources, pricingInfo, plan);
}
private ClusterResources clusterResources(String resourcesString) {
String[] elements = resourcesString.split(",");
if (elements.length == 0)
throw new IllegalArgumentException("nothing found in cluster resources: " + resourcesString);
var nodes = 0;
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst()) {
case "nodes" -> nodes = parseInt(element.getSecond());
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memoryGb" -> memoryGb = parseDouble(element.getSecond());
case "diskGb" -> diskGb = parseDouble(element.getSecond());
case "gpuMemoryGb" -> gpuMemoryGb = parseDouble(element.getSecond());
}
}
var nodeResources = new NodeResources(vcpu, memoryGb, diskGb, 0);
if (gpuMemoryGb > 0)
nodeResources = nodeResources.with(new NodeResources.GpuResources(1, gpuMemoryGb));
return new ClusterResources(nodes, 1, nodeResources);
}
private List<Pair<String, String>> keysAndValues(String[] elements) {
return Arrays.stream(elements).map(element -> {
var index = element.indexOf("=");
if (index <= 0 ) throw new IllegalArgumentException("Error in query parameter, expected '=' between key and value: " + element);
return new Pair<>(element.substring(0, index), element.substring(index + 1));
})
.collect(Collectors.toList());
}
private Optional<Plan> plan(String element) {
return controller.serviceRegistry().planRegistry().plan(element);
}
private static void addItem(Cursor array, String name, BigDecimal amount) {
if (amount.compareTo(BigDecimal.ZERO) != 0) {
var o = array.addObject();
o.setString("description", name);
o.setString("amount", SCALED_ZERO.add(amount).toPlainString());
}
}
} |
The item order should be the same as calculation order, so committed spend should be last? Also consider dropping "discount" here since this could be negative if the tenant under-spends. | private static SlimeJsonResponse response(PriceInformation priceInfo) {
var slime = new Slime();
Cursor cursor = slime.setObject();
var array = cursor.setArray("priceInfo");
addItem(array, "List price", priceInfo.listPrice());
addItem(array, "Volume discount", priceInfo.volumeDiscount());
addItem(array, "Committed spend discount", priceInfo.committedAmountDiscount());
addItem(array, "Enclave discount", priceInfo.enclaveDiscount());
cursor.setString("Total", priceInfo.totalAmount().toPlainString());
return new SlimeJsonResponse(slime);
} | addItem(array, "Committed spend discount", priceInfo.committedAmountDiscount()); | private static SlimeJsonResponse response(PriceInformation priceInfo) {
var slime = new Slime();
Cursor cursor = slime.setObject();
var array = cursor.setArray("priceInfo");
addItem(array, "List price", priceInfo.listPrice());
addItem(array, "Enclave discount", priceInfo.enclaveDiscount());
addItem(array, "Volume discount", priceInfo.volumeDiscount());
addItem(array, "Committed spend", priceInfo.committedAmountDiscount());
cursor.setString("totalAmount", priceInfo.totalAmount().toPlainString());
return new SlimeJsonResponse(slime);
} | class PricingApiHandler extends ThreadedHttpRequestHandler {
private static final Logger log = Logger.getLogger(PricingApiHandler.class.getName());
private static final BigDecimal SCALED_ZERO = BigDecimal.ZERO.setScale(2);
private final Controller controller;
@Inject
public PricingApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
if (request.getMethod() != GET)
return methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
try {
return handleGET(request);
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/pricing/v1/pricing")) return pricing(request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse pricing(HttpRequest request) {
String rawQuery = request.getUri().getRawQuery();
PriceInformation price = parseQuery(rawQuery);
return response(price);
}
private PriceInformation parseQuery(String rawQuery) {
String[] elements = URLDecoder.decode(rawQuery, UTF_8).split("&");
if (elements.length == 0) throw new IllegalArgumentException("no price information found in query");
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ClusterResources> clusterResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst()) {
case "committedSpend" -> committedSpend = parseDouble(entry.getSecond());
case "enclave" -> enclave = Boolean.parseBoolean(entry.getSecond());
case "planId" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportLevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "resources" -> clusterResources.add(clusterResources(entry.getSecond()));
}
}
if (clusterResources.size() < 1) throw new IllegalArgumentException("No cluster resources found in query");
PricingInfo pricingInfo = new PricingInfo(enclave, supportLevel, committedSpend);
return controller.serviceRegistry().pricingController().price(clusterResources, pricingInfo, plan);
}
private ClusterResources clusterResources(String resourcesString) {
String[] elements = resourcesString.split(",");
if (elements.length == 0)
throw new IllegalArgumentException("nothing found in cluster resources: " + resourcesString);
var nodes = 0;
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst()) {
case "nodes" -> nodes = parseInt(element.getSecond());
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memoryGb" -> memoryGb = parseDouble(element.getSecond());
case "diskGb" -> diskGb = parseDouble(element.getSecond());
case "gpuMemoryGb" -> gpuMemoryGb = parseDouble(element.getSecond());
}
}
var nodeResources = new NodeResources(vcpu, memoryGb, diskGb, 0);
if (gpuMemoryGb > 0)
nodeResources = nodeResources.with(new NodeResources.GpuResources(1, gpuMemoryGb));
return new ClusterResources(nodes, 1, nodeResources);
}
private List<Pair<String, String>> keysAndValues(String[] elements) {
return Arrays.stream(elements).map(element -> {
var index = element.indexOf("=");
if (index <= 0 ) throw new IllegalArgumentException("Error in query parameter, expected '=' between key and value: " + element);
return new Pair<>(element.substring(0, index), element.substring(index + 1));
})
.collect(Collectors.toList());
}
private Optional<Plan> plan(String element) {
return controller.serviceRegistry().planRegistry().plan(element);
}
private static void addItem(Cursor array, String name, BigDecimal amount) {
if (amount.compareTo(BigDecimal.ZERO) > 0) {
var o = array.addObject();
o.setString("description", name);
o.setString("amount", SCALED_ZERO.add(amount).toPlainString());
}
}
} | class PricingApiHandler extends ThreadedHttpRequestHandler {
private static final Logger log = Logger.getLogger(PricingApiHandler.class.getName());
private static final BigDecimal SCALED_ZERO = BigDecimal.ZERO.setScale(2);
private final Controller controller;
@Inject
public PricingApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
if (request.getMethod() != GET)
return methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
try {
return handleGET(request);
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/pricing/v1/pricing")) return pricing(request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse pricing(HttpRequest request) {
String rawQuery = request.getUri().getRawQuery();
PriceInformation price = parseQuery(rawQuery);
return response(price);
}
private PriceInformation parseQuery(String rawQuery) {
String[] elements = URLDecoder.decode(rawQuery, UTF_8).split("&");
if (elements.length == 0) throw new IllegalArgumentException("no price information found in query");
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ClusterResources> clusterResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst()) {
case "committedSpend" -> committedSpend = parseDouble(entry.getSecond());
case "enclave" -> enclave = Boolean.parseBoolean(entry.getSecond());
case "planId" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportLevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "resources" -> clusterResources.add(clusterResources(entry.getSecond()));
}
}
if (clusterResources.size() < 1) throw new IllegalArgumentException("No cluster resources found in query");
PricingInfo pricingInfo = new PricingInfo(enclave, supportLevel, committedSpend);
return controller.serviceRegistry().pricingController().price(clusterResources, pricingInfo, plan);
}
private ClusterResources clusterResources(String resourcesString) {
String[] elements = resourcesString.split(",");
if (elements.length == 0)
throw new IllegalArgumentException("nothing found in cluster resources: " + resourcesString);
var nodes = 0;
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst()) {
case "nodes" -> nodes = parseInt(element.getSecond());
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memoryGb" -> memoryGb = parseDouble(element.getSecond());
case "diskGb" -> diskGb = parseDouble(element.getSecond());
case "gpuMemoryGb" -> gpuMemoryGb = parseDouble(element.getSecond());
}
}
var nodeResources = new NodeResources(vcpu, memoryGb, diskGb, 0);
if (gpuMemoryGb > 0)
nodeResources = nodeResources.with(new NodeResources.GpuResources(1, gpuMemoryGb));
return new ClusterResources(nodes, 1, nodeResources);
}
private List<Pair<String, String>> keysAndValues(String[] elements) {
return Arrays.stream(elements).map(element -> {
var index = element.indexOf("=");
if (index <= 0 ) throw new IllegalArgumentException("Error in query parameter, expected '=' between key and value: " + element);
return new Pair<>(element.substring(0, index), element.substring(index + 1));
})
.collect(Collectors.toList());
}
private Optional<Plan> plan(String element) {
return controller.serviceRegistry().planRegistry().plan(element);
}
private static void addItem(Cursor array, String name, BigDecimal amount) {
if (amount.compareTo(BigDecimal.ZERO) != 0) {
var o = array.addObject();
o.setString("description", name);
o.setString("amount", SCALED_ZERO.add(amount).toPlainString());
}
}
} |
Yup, agreed, fixing | private static SlimeJsonResponse response(PriceInformation priceInfo) {
var slime = new Slime();
Cursor cursor = slime.setObject();
var array = cursor.setArray("priceInfo");
addItem(array, "List price", priceInfo.listPrice());
addItem(array, "Volume discount", priceInfo.volumeDiscount());
addItem(array, "Committed spend discount", priceInfo.committedAmountDiscount());
addItem(array, "Enclave discount", priceInfo.enclaveDiscount());
cursor.setString("Total", priceInfo.totalAmount().toPlainString());
return new SlimeJsonResponse(slime);
} | addItem(array, "Committed spend discount", priceInfo.committedAmountDiscount()); | private static SlimeJsonResponse response(PriceInformation priceInfo) {
var slime = new Slime();
Cursor cursor = slime.setObject();
var array = cursor.setArray("priceInfo");
addItem(array, "List price", priceInfo.listPrice());
addItem(array, "Enclave discount", priceInfo.enclaveDiscount());
addItem(array, "Volume discount", priceInfo.volumeDiscount());
addItem(array, "Committed spend", priceInfo.committedAmountDiscount());
cursor.setString("totalAmount", priceInfo.totalAmount().toPlainString());
return new SlimeJsonResponse(slime);
} | class PricingApiHandler extends ThreadedHttpRequestHandler {
private static final Logger log = Logger.getLogger(PricingApiHandler.class.getName());
private static final BigDecimal SCALED_ZERO = BigDecimal.ZERO.setScale(2);
private final Controller controller;
@Inject
public PricingApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
if (request.getMethod() != GET)
return methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
try {
return handleGET(request);
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/pricing/v1/pricing")) return pricing(request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse pricing(HttpRequest request) {
String rawQuery = request.getUri().getRawQuery();
PriceInformation price = parseQuery(rawQuery);
return response(price);
}
private PriceInformation parseQuery(String rawQuery) {
String[] elements = URLDecoder.decode(rawQuery, UTF_8).split("&");
if (elements.length == 0) throw new IllegalArgumentException("no price information found in query");
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ClusterResources> clusterResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst()) {
case "committedSpend" -> committedSpend = parseDouble(entry.getSecond());
case "enclave" -> enclave = Boolean.parseBoolean(entry.getSecond());
case "planId" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportLevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "resources" -> clusterResources.add(clusterResources(entry.getSecond()));
}
}
if (clusterResources.size() < 1) throw new IllegalArgumentException("No cluster resources found in query");
PricingInfo pricingInfo = new PricingInfo(enclave, supportLevel, committedSpend);
return controller.serviceRegistry().pricingController().price(clusterResources, pricingInfo, plan);
}
private ClusterResources clusterResources(String resourcesString) {
String[] elements = resourcesString.split(",");
if (elements.length == 0)
throw new IllegalArgumentException("nothing found in cluster resources: " + resourcesString);
var nodes = 0;
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst()) {
case "nodes" -> nodes = parseInt(element.getSecond());
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memoryGb" -> memoryGb = parseDouble(element.getSecond());
case "diskGb" -> diskGb = parseDouble(element.getSecond());
case "gpuMemoryGb" -> gpuMemoryGb = parseDouble(element.getSecond());
}
}
var nodeResources = new NodeResources(vcpu, memoryGb, diskGb, 0);
if (gpuMemoryGb > 0)
nodeResources = nodeResources.with(new NodeResources.GpuResources(1, gpuMemoryGb));
return new ClusterResources(nodes, 1, nodeResources);
}
private List<Pair<String, String>> keysAndValues(String[] elements) {
return Arrays.stream(elements).map(element -> {
var index = element.indexOf("=");
if (index <= 0 ) throw new IllegalArgumentException("Error in query parameter, expected '=' between key and value: " + element);
return new Pair<>(element.substring(0, index), element.substring(index + 1));
})
.collect(Collectors.toList());
}
private Optional<Plan> plan(String element) {
return controller.serviceRegistry().planRegistry().plan(element);
}
private static void addItem(Cursor array, String name, BigDecimal amount) {
if (amount.compareTo(BigDecimal.ZERO) > 0) {
var o = array.addObject();
o.setString("description", name);
o.setString("amount", SCALED_ZERO.add(amount).toPlainString());
}
}
} | class PricingApiHandler extends ThreadedHttpRequestHandler {
private static final Logger log = Logger.getLogger(PricingApiHandler.class.getName());
private static final BigDecimal SCALED_ZERO = BigDecimal.ZERO.setScale(2);
private final Controller controller;
@Inject
public PricingApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
if (request.getMethod() != GET)
return methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
try {
return handleGET(request);
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/pricing/v1/pricing")) return pricing(request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse pricing(HttpRequest request) {
String rawQuery = request.getUri().getRawQuery();
PriceInformation price = parseQuery(rawQuery);
return response(price);
}
private PriceInformation parseQuery(String rawQuery) {
String[] elements = URLDecoder.decode(rawQuery, UTF_8).split("&");
if (elements.length == 0) throw new IllegalArgumentException("no price information found in query");
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ClusterResources> clusterResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst()) {
case "committedSpend" -> committedSpend = parseDouble(entry.getSecond());
case "enclave" -> enclave = Boolean.parseBoolean(entry.getSecond());
case "planId" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportLevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "resources" -> clusterResources.add(clusterResources(entry.getSecond()));
}
}
if (clusterResources.size() < 1) throw new IllegalArgumentException("No cluster resources found in query");
PricingInfo pricingInfo = new PricingInfo(enclave, supportLevel, committedSpend);
return controller.serviceRegistry().pricingController().price(clusterResources, pricingInfo, plan);
}
private ClusterResources clusterResources(String resourcesString) {
String[] elements = resourcesString.split(",");
if (elements.length == 0)
throw new IllegalArgumentException("nothing found in cluster resources: " + resourcesString);
var nodes = 0;
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst()) {
case "nodes" -> nodes = parseInt(element.getSecond());
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memoryGb" -> memoryGb = parseDouble(element.getSecond());
case "diskGb" -> diskGb = parseDouble(element.getSecond());
case "gpuMemoryGb" -> gpuMemoryGb = parseDouble(element.getSecond());
}
}
var nodeResources = new NodeResources(vcpu, memoryGb, diskGb, 0);
if (gpuMemoryGb > 0)
nodeResources = nodeResources.with(new NodeResources.GpuResources(1, gpuMemoryGb));
return new ClusterResources(nodes, 1, nodeResources);
}
private List<Pair<String, String>> keysAndValues(String[] elements) {
return Arrays.stream(elements).map(element -> {
var index = element.indexOf("=");
if (index <= 0 ) throw new IllegalArgumentException("Error in query parameter, expected '=' between key and value: " + element);
return new Pair<>(element.substring(0, index), element.substring(index + 1));
})
.collect(Collectors.toList());
}
private Optional<Plan> plan(String element) {
return controller.serviceRegistry().planRegistry().plan(element);
}
private static void addItem(Cursor array, String name, BigDecimal amount) {
if (amount.compareTo(BigDecimal.ZERO) != 0) {
var o = array.addObject();
o.setString("description", name);
o.setString("amount", SCALED_ZERO.add(amount).toPlainString());
}
}
} |
Right, will fix | private static SlimeJsonResponse response(PriceInformation priceInfo) {
var slime = new Slime();
Cursor cursor = slime.setObject();
var array = cursor.setArray("priceInfo");
addItem(array, "List price", priceInfo.listPrice());
addItem(array, "Volume discount", priceInfo.volumeDiscount());
addItem(array, "Committed spend discount", priceInfo.committedAmountDiscount());
addItem(array, "Enclave discount", priceInfo.enclaveDiscount());
cursor.setString("Total", priceInfo.totalAmount().toPlainString());
return new SlimeJsonResponse(slime);
} | cursor.setString("Total", priceInfo.totalAmount().toPlainString()); | private static SlimeJsonResponse response(PriceInformation priceInfo) {
var slime = new Slime();
Cursor cursor = slime.setObject();
var array = cursor.setArray("priceInfo");
addItem(array, "List price", priceInfo.listPrice());
addItem(array, "Enclave discount", priceInfo.enclaveDiscount());
addItem(array, "Volume discount", priceInfo.volumeDiscount());
addItem(array, "Committed spend", priceInfo.committedAmountDiscount());
cursor.setString("totalAmount", priceInfo.totalAmount().toPlainString());
return new SlimeJsonResponse(slime);
} | class PricingApiHandler extends ThreadedHttpRequestHandler {
private static final Logger log = Logger.getLogger(PricingApiHandler.class.getName());
private static final BigDecimal SCALED_ZERO = BigDecimal.ZERO.setScale(2);
private final Controller controller;
@Inject
public PricingApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
if (request.getMethod() != GET)
return methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
try {
return handleGET(request);
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/pricing/v1/pricing")) return pricing(request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse pricing(HttpRequest request) {
String rawQuery = request.getUri().getRawQuery();
PriceInformation price = parseQuery(rawQuery);
return response(price);
}
private PriceInformation parseQuery(String rawQuery) {
String[] elements = URLDecoder.decode(rawQuery, UTF_8).split("&");
if (elements.length == 0) throw new IllegalArgumentException("no price information found in query");
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ClusterResources> clusterResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst()) {
case "committedSpend" -> committedSpend = parseDouble(entry.getSecond());
case "enclave" -> enclave = Boolean.parseBoolean(entry.getSecond());
case "planId" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportLevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "resources" -> clusterResources.add(clusterResources(entry.getSecond()));
}
}
if (clusterResources.size() < 1) throw new IllegalArgumentException("No cluster resources found in query");
PricingInfo pricingInfo = new PricingInfo(enclave, supportLevel, committedSpend);
return controller.serviceRegistry().pricingController().price(clusterResources, pricingInfo, plan);
}
private ClusterResources clusterResources(String resourcesString) {
String[] elements = resourcesString.split(",");
if (elements.length == 0)
throw new IllegalArgumentException("nothing found in cluster resources: " + resourcesString);
var nodes = 0;
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst()) {
case "nodes" -> nodes = parseInt(element.getSecond());
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memoryGb" -> memoryGb = parseDouble(element.getSecond());
case "diskGb" -> diskGb = parseDouble(element.getSecond());
case "gpuMemoryGb" -> gpuMemoryGb = parseDouble(element.getSecond());
}
}
var nodeResources = new NodeResources(vcpu, memoryGb, diskGb, 0);
if (gpuMemoryGb > 0)
nodeResources = nodeResources.with(new NodeResources.GpuResources(1, gpuMemoryGb));
return new ClusterResources(nodes, 1, nodeResources);
}
private List<Pair<String, String>> keysAndValues(String[] elements) {
return Arrays.stream(elements).map(element -> {
var index = element.indexOf("=");
if (index <= 0 ) throw new IllegalArgumentException("Error in query parameter, expected '=' between key and value: " + element);
return new Pair<>(element.substring(0, index), element.substring(index + 1));
})
.collect(Collectors.toList());
}
private Optional<Plan> plan(String element) {
return controller.serviceRegistry().planRegistry().plan(element);
}
private static void addItem(Cursor array, String name, BigDecimal amount) {
if (amount.compareTo(BigDecimal.ZERO) > 0) {
var o = array.addObject();
o.setString("description", name);
o.setString("amount", SCALED_ZERO.add(amount).toPlainString());
}
}
} | class PricingApiHandler extends ThreadedHttpRequestHandler {
private static final Logger log = Logger.getLogger(PricingApiHandler.class.getName());
private static final BigDecimal SCALED_ZERO = BigDecimal.ZERO.setScale(2);
private final Controller controller;
@Inject
public PricingApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
if (request.getMethod() != GET)
return methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
try {
return handleGET(request);
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/pricing/v1/pricing")) return pricing(request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse pricing(HttpRequest request) {
String rawQuery = request.getUri().getRawQuery();
PriceInformation price = parseQuery(rawQuery);
return response(price);
}
private PriceInformation parseQuery(String rawQuery) {
String[] elements = URLDecoder.decode(rawQuery, UTF_8).split("&");
if (elements.length == 0) throw new IllegalArgumentException("no price information found in query");
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ClusterResources> clusterResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst()) {
case "committedSpend" -> committedSpend = parseDouble(entry.getSecond());
case "enclave" -> enclave = Boolean.parseBoolean(entry.getSecond());
case "planId" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportLevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "resources" -> clusterResources.add(clusterResources(entry.getSecond()));
}
}
if (clusterResources.size() < 1) throw new IllegalArgumentException("No cluster resources found in query");
PricingInfo pricingInfo = new PricingInfo(enclave, supportLevel, committedSpend);
return controller.serviceRegistry().pricingController().price(clusterResources, pricingInfo, plan);
}
private ClusterResources clusterResources(String resourcesString) {
String[] elements = resourcesString.split(",");
if (elements.length == 0)
throw new IllegalArgumentException("nothing found in cluster resources: " + resourcesString);
var nodes = 0;
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst()) {
case "nodes" -> nodes = parseInt(element.getSecond());
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memoryGb" -> memoryGb = parseDouble(element.getSecond());
case "diskGb" -> diskGb = parseDouble(element.getSecond());
case "gpuMemoryGb" -> gpuMemoryGb = parseDouble(element.getSecond());
}
}
var nodeResources = new NodeResources(vcpu, memoryGb, diskGb, 0);
if (gpuMemoryGb > 0)
nodeResources = nodeResources.with(new NodeResources.GpuResources(1, gpuMemoryGb));
return new ClusterResources(nodes, 1, nodeResources);
}
private List<Pair<String, String>> keysAndValues(String[] elements) {
return Arrays.stream(elements).map(element -> {
var index = element.indexOf("=");
if (index <= 0 ) throw new IllegalArgumentException("Error in query parameter, expected '=' between key and value: " + element);
return new Pair<>(element.substring(0, index), element.substring(index + 1));
})
.collect(Collectors.toList());
}
private Optional<Plan> plan(String element) {
return controller.serviceRegistry().planRegistry().plan(element);
}
private static void addItem(Cursor array, String name, BigDecimal amount) {
if (amount.compareTo(BigDecimal.ZERO) != 0) {
var o = array.addObject();
o.setString("description", name);
o.setString("amount", SCALED_ZERO.add(amount).toPlainString());
}
}
} |
`tempConfigPath` no longer exists at this point. Diff must be computed before the file is moved above. I also don't think `configPath` is guaranteed to exist the first time, e.g. on a new proxy node, so `getDiff` should confirm its existence before running the command. | private void loadConfig(int upstreamCount) throws IOException {
Path configPath = NginxPath.config.in(fileSystem);
Path tempConfigPath = NginxPath.temporaryConfig.in(fileSystem);
try {
String currentConfig = Files.readString(configPath);
String newConfig = Files.readString(tempConfigPath);
if (currentConfig.equals(newConfig)) {
Files.deleteIfExists(tempConfigPath);
return;
}
Path rotatedConfig = NginxPath.config.rotatedIn(fileSystem, clock.instant());
atomicCopy(configPath, rotatedConfig);
} catch (NoSuchFileException ignored) {
}
Files.move(tempConfigPath, configPath, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
metric.add(CONFIG_RELOADS_METRIC, 1, null);
LOG.info("Loading new configuration file from " + configPath + (outputRoutingDiff ? " with diff:\n" + getDiff(configPath, tempConfigPath) : ""));
retryingExec("/usr/bin/sudo /opt/vespa/bin/vespa-reload-nginx");
metric.add(OK_CONFIG_RELOADS_METRIC, 1, null);
metric.set(GENERATED_UPSTREAMS_METRIC, upstreamCount, null);
} | LOG.info("Loading new configuration file from " + configPath + (outputRoutingDiff ? " with diff:\n" + getDiff(configPath, tempConfigPath) : "")); | private void loadConfig(int upstreamCount) throws IOException {
Path configPath = NginxPath.config.in(fileSystem);
Path tempConfigPath = NginxPath.temporaryConfig.in(fileSystem);
String routingDiff = "";
try {
String currentConfig = Files.readString(configPath);
String newConfig = Files.readString(tempConfigPath);
if (currentConfig.equals(newConfig)) {
Files.deleteIfExists(tempConfigPath);
return;
}
if(outputRoutingDiff)
routingDiff = " with diff:\n" + getDiff(configPath, tempConfigPath);
Path rotatedConfig = NginxPath.config.rotatedIn(fileSystem, clock.instant());
atomicCopy(configPath, rotatedConfig);
} catch (NoSuchFileException ignored) {
}
Files.move(tempConfigPath, configPath, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
metric.add(CONFIG_RELOADS_METRIC, 1, null);
LOG.info("Loading new configuration file from " + configPath + routingDiff);
retryingExec("/usr/bin/sudo /opt/vespa/bin/vespa-reload-nginx");
metric.add(OK_CONFIG_RELOADS_METRIC, 1, null);
metric.set(GENERATED_UPSTREAMS_METRIC, upstreamCount, null);
} | class Nginx implements Router {
private static final Logger LOG = Logger.getLogger(Nginx.class.getName());
private static final int EXEC_ATTEMPTS = 5;
static final String GENERATED_UPSTREAMS_METRIC = "upstreams_generated";
static final String CONFIG_RELOADS_METRIC = "upstreams_nginx_reloads";
static final String OK_CONFIG_RELOADS_METRIC = "upstreams_nginx_reloads_succeeded";
private final FileSystem fileSystem;
private final ProcessExecuter processExecuter;
private final Sleeper sleeper;
private final Clock clock;
private final RoutingStatus routingStatus;
private final Metric metric;
private final boolean outputRoutingDiff;
private final Object monitor = new Object();
public Nginx(FileSystem fileSystem, ProcessExecuter processExecuter, Sleeper sleeper, Clock clock, RoutingStatus routingStatus, Metric metric, boolean outputRoutingDiff) {
this.fileSystem = Objects.requireNonNull(fileSystem);
this.processExecuter = Objects.requireNonNull(processExecuter);
this.sleeper = Objects.requireNonNull(sleeper);
this.clock = Objects.requireNonNull(clock);
this.routingStatus = Objects.requireNonNull(routingStatus);
this.metric = Objects.requireNonNull(metric);
this.outputRoutingDiff = outputRoutingDiff;
}
@Override
public void load(RoutingTable table) {
synchronized (monitor) {
try {
table = table.routingMethod(RoutingMethod.sharedLayer4);
testConfig(table);
loadConfig(table.asMap().size());
gcConfig();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
/** Write given routing table to a temporary config file and test it */
private void testConfig(RoutingTable table) throws IOException {
String config = NginxConfig.from(table, routingStatus);
Files.createDirectories(NginxPath.root.in(fileSystem));
atomicWriteString(NginxPath.temporaryConfig.in(fileSystem), config);
retryingExec("/usr/bin/sudo /opt/vespa/bin/vespa-verify-nginx");
}
/** Load tested config into Nginx */
private String getDiff(Path configPath, Path tempConfigPath) throws IOException {
Pair<Integer, String> executed = processExecuter.exec("diff -U1 " + configPath + " " + tempConfigPath);
return executed.getSecond();
}
/** Remove old config files */
private void gcConfig() throws IOException {
Instant oneWeekAgo = clock.instant().minus(Duration.ofDays(7));
String configBasename = NginxPath.config.in(fileSystem).getFileName().toString();
try (var entries = Files.list(NginxPath.root.in(fileSystem))) {
entries.filter(Files::isRegularFile)
.filter(path -> path.getFileName().toString().startsWith(configBasename))
.filter(path -> rotatedAt(path).map(instant -> instant.isBefore(oneWeekAgo))
.orElse(false))
.forEach(path -> Exceptions.uncheck(() -> Files.deleteIfExists(path)));
}
}
/** Returns the time given path was rotated */
private Optional<Instant> rotatedAt(Path path) {
String[] parts = path.getFileName().toString().split("-", 2);
if (parts.length != 2) return Optional.empty();
return Optional.of(LocalDateTime.from(NginxPath.ROTATED_SUFFIX_FORMAT.parse(parts[1])).toInstant(ZoneOffset.UTC));
}
/** Run given command. Retries after a delay on failure */
private void retryingExec(String command) {
boolean success = false;
for (int attempt = 1; attempt <= EXEC_ATTEMPTS; attempt++) {
String errorMessage;
try {
Pair<Integer, String> result = processExecuter.exec(command);
if (result.getFirst() == 0) {
success = true;
break;
}
errorMessage = result.getSecond();
} catch (IOException e) {
errorMessage = Exceptions.toMessageString(e);
}
Duration duration = Duration.ofSeconds((long) Math.pow(2, attempt));
LOG.log(Level.WARNING, "Failed to run " + command + " on attempt " + attempt + ": " + errorMessage +
". Retrying in " + duration);
sleeper.sleep(duration);
}
if (!success) {
throw new RuntimeException("Failed to run " + command + " successfully after " + EXEC_ATTEMPTS +
" attempts, giving up");
}
}
/** Apply pathOperation to a temporary file, then atomically move the temporary file to path */
private void atomicWrite(Path path, PathOperation pathOperation) throws IOException {
Path tempFile = null;
try {
tempFile = Files.createTempFile(path.getParent(), "nginx", "");
pathOperation.run(tempFile);
Files.move(tempFile, path, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
} finally {
if (tempFile != null) {
Files.deleteIfExists(tempFile);
}
}
}
private void atomicCopy(Path src, Path dst) throws IOException {
atomicWrite(dst, (tempFile) -> Files.copy(src, tempFile,
StandardCopyOption.REPLACE_EXISTING,
StandardCopyOption.COPY_ATTRIBUTES));
}
private void atomicWriteString(Path path, String content) throws IOException {
atomicWrite(path, (tempFile) -> Files.writeString(tempFile, content));
}
@FunctionalInterface
private interface PathOperation {
void run(Path path) throws IOException;
}
} | class Nginx implements Router {
private static final Logger LOG = Logger.getLogger(Nginx.class.getName());
private static final int EXEC_ATTEMPTS = 5;
static final String GENERATED_UPSTREAMS_METRIC = "upstreams_generated";
static final String CONFIG_RELOADS_METRIC = "upstreams_nginx_reloads";
static final String OK_CONFIG_RELOADS_METRIC = "upstreams_nginx_reloads_succeeded";
private final FileSystem fileSystem;
private final ProcessExecuter processExecuter;
private final Sleeper sleeper;
private final Clock clock;
private final RoutingStatus routingStatus;
private final Metric metric;
private final boolean outputRoutingDiff;
private final Object monitor = new Object();
public Nginx(FileSystem fileSystem, ProcessExecuter processExecuter, Sleeper sleeper, Clock clock, RoutingStatus routingStatus, Metric metric, boolean outputRoutingDiff) {
this.fileSystem = Objects.requireNonNull(fileSystem);
this.processExecuter = Objects.requireNonNull(processExecuter);
this.sleeper = Objects.requireNonNull(sleeper);
this.clock = Objects.requireNonNull(clock);
this.routingStatus = Objects.requireNonNull(routingStatus);
this.metric = Objects.requireNonNull(metric);
this.outputRoutingDiff = outputRoutingDiff;
}
@Override
public void load(RoutingTable table) {
synchronized (monitor) {
try {
table = table.routingMethod(RoutingMethod.sharedLayer4);
testConfig(table);
loadConfig(table.asMap().size());
gcConfig();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
/** Write given routing table to a temporary config file and test it */
private void testConfig(RoutingTable table) throws IOException {
String config = NginxConfig.from(table, routingStatus);
Files.createDirectories(NginxPath.root.in(fileSystem));
atomicWriteString(NginxPath.temporaryConfig.in(fileSystem), config);
retryingExec("/usr/bin/sudo /opt/vespa/bin/vespa-verify-nginx");
}
/** Load tested config into Nginx */
private String getDiff(Path configPath, Path tempConfigPath) throws IOException {
Pair<Integer, String> executed = processExecuter.exec("diff -U1 " + configPath + " " + tempConfigPath);
return executed.getSecond();
}
/** Remove old config files */
private void gcConfig() throws IOException {
Instant oneWeekAgo = clock.instant().minus(Duration.ofDays(7));
String configBasename = NginxPath.config.in(fileSystem).getFileName().toString();
try (var entries = Files.list(NginxPath.root.in(fileSystem))) {
entries.filter(Files::isRegularFile)
.filter(path -> path.getFileName().toString().startsWith(configBasename))
.filter(path -> rotatedAt(path).map(instant -> instant.isBefore(oneWeekAgo))
.orElse(false))
.forEach(path -> Exceptions.uncheck(() -> Files.deleteIfExists(path)));
}
}
/** Returns the time given path was rotated */
private Optional<Instant> rotatedAt(Path path) {
String[] parts = path.getFileName().toString().split("-", 2);
if (parts.length != 2) return Optional.empty();
return Optional.of(LocalDateTime.from(NginxPath.ROTATED_SUFFIX_FORMAT.parse(parts[1])).toInstant(ZoneOffset.UTC));
}
/** Run given command. Retries after a delay on failure */
private void retryingExec(String command) {
boolean success = false;
for (int attempt = 1; attempt <= EXEC_ATTEMPTS; attempt++) {
String errorMessage;
try {
Pair<Integer, String> result = processExecuter.exec(command);
if (result.getFirst() == 0) {
success = true;
break;
}
errorMessage = result.getSecond();
} catch (IOException e) {
errorMessage = Exceptions.toMessageString(e);
}
Duration duration = Duration.ofSeconds((long) Math.pow(2, attempt));
LOG.log(Level.WARNING, "Failed to run " + command + " on attempt " + attempt + ": " + errorMessage +
". Retrying in " + duration);
sleeper.sleep(duration);
}
if (!success) {
throw new RuntimeException("Failed to run " + command + " successfully after " + EXEC_ATTEMPTS +
" attempts, giving up");
}
}
/** Apply pathOperation to a temporary file, then atomically move the temporary file to path */
private void atomicWrite(Path path, PathOperation pathOperation) throws IOException {
Path tempFile = null;
try {
tempFile = Files.createTempFile(path.getParent(), "nginx", "");
pathOperation.run(tempFile);
Files.move(tempFile, path, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
} finally {
if (tempFile != null) {
Files.deleteIfExists(tempFile);
}
}
}
private void atomicCopy(Path src, Path dst) throws IOException {
atomicWrite(dst, (tempFile) -> Files.copy(src, tempFile,
StandardCopyOption.REPLACE_EXISTING,
StandardCopyOption.COPY_ATTRIBUTES));
}
private void atomicWriteString(Path path, String content) throws IOException {
atomicWrite(path, (tempFile) -> Files.writeString(tempFile, content));
}
@FunctionalInterface
private interface PathOperation {
void run(Path path) throws IOException;
}
} |
Could probably use auto-(un)boxing of bools for these if desired, i.e. `if (alreadySending && ...)`, `isSending.set(true)`. etc | private void sendNextInSequence(long seqId) {
Message msg = null;
synchronized (this) {
Queue<Message> queue = seqMap.get(seqId);
if (queue == null || queue.isEmpty()) {
seqMap.remove(seqId);
} else {
msg = queue.remove();
}
}
if (msg != null) {
Boolean alreadySending = isSending.get();
if ((alreadySending == Boolean.TRUE) && (msn != null)) {
msn.enqueue(new SequencedSendTask(msg));
} else {
isSending.set(Boolean.TRUE);
sequencedSend(msg);
}
isSending.set(Boolean.FALSE);
}
} | if ((alreadySending == Boolean.TRUE) && (msn != null)) { | private void sendNextInSequence(long seqId) {
Message msg = null;
synchronized (this) {
Queue<Message> queue = seqMap.get(seqId);
if (queue == null || queue.isEmpty()) {
seqMap.remove(seqId);
} else {
msg = queue.remove();
}
}
if (msg != null) {
Boolean alreadySending = isSending.get();
if (alreadySending && (msn != null)) {
msn.enqueue(new SequencedSendTask(msg));
} else {
isSending.set(Boolean.TRUE);
sequencedSend(msg);
}
isSending.set(Boolean.FALSE);
}
} | class SequencedSendTask implements Messenger.Task {
private final Message msg;
SequencedSendTask(Message msg) { this.msg = msg; }
@Override public void run() { sequencedSend(msg); }
@Override public void destroy() { msg.discard(); }
} | class SequencedSendTask implements Messenger.Task {
private final Message msg;
SequencedSendTask(Message msg) { this.msg = msg; }
@Override public void run() { sequencedSend(msg); }
@Override public void destroy() { msg.discard(); }
} |
It will be null first time, unless .withInitial | private void sendNextInSequence(long seqId) {
Message msg = null;
synchronized (this) {
Queue<Message> queue = seqMap.get(seqId);
if (queue == null || queue.isEmpty()) {
seqMap.remove(seqId);
} else {
msg = queue.remove();
}
}
if (msg != null) {
Boolean alreadySending = isSending.get();
if ((alreadySending == Boolean.TRUE) && (msn != null)) {
msn.enqueue(new SequencedSendTask(msg));
} else {
isSending.set(Boolean.TRUE);
sequencedSend(msg);
}
isSending.set(Boolean.FALSE);
}
} | if ((alreadySending == Boolean.TRUE) && (msn != null)) { | private void sendNextInSequence(long seqId) {
Message msg = null;
synchronized (this) {
Queue<Message> queue = seqMap.get(seqId);
if (queue == null || queue.isEmpty()) {
seqMap.remove(seqId);
} else {
msg = queue.remove();
}
}
if (msg != null) {
Boolean alreadySending = isSending.get();
if (alreadySending && (msn != null)) {
msn.enqueue(new SequencedSendTask(msg));
} else {
isSending.set(Boolean.TRUE);
sequencedSend(msg);
}
isSending.set(Boolean.FALSE);
}
} | class SequencedSendTask implements Messenger.Task {
private final Message msg;
SequencedSendTask(Message msg) { this.msg = msg; }
@Override public void run() { sequencedSend(msg); }
@Override public void destroy() { msg.discard(); }
} | class SequencedSendTask implements Messenger.Task {
private final Message msg;
SequencedSendTask(Message msg) { this.msg = msg; }
@Override public void run() { sequencedSend(msg); }
@Override public void destroy() { msg.discard(); }
} |
Done | private void sendNextInSequence(long seqId) {
Message msg = null;
synchronized (this) {
Queue<Message> queue = seqMap.get(seqId);
if (queue == null || queue.isEmpty()) {
seqMap.remove(seqId);
} else {
msg = queue.remove();
}
}
if (msg != null) {
Boolean alreadySending = isSending.get();
if ((alreadySending == Boolean.TRUE) && (msn != null)) {
msn.enqueue(new SequencedSendTask(msg));
} else {
isSending.set(Boolean.TRUE);
sequencedSend(msg);
}
isSending.set(Boolean.FALSE);
}
} | if ((alreadySending == Boolean.TRUE) && (msn != null)) { | private void sendNextInSequence(long seqId) {
Message msg = null;
synchronized (this) {
Queue<Message> queue = seqMap.get(seqId);
if (queue == null || queue.isEmpty()) {
seqMap.remove(seqId);
} else {
msg = queue.remove();
}
}
if (msg != null) {
Boolean alreadySending = isSending.get();
if (alreadySending && (msn != null)) {
msn.enqueue(new SequencedSendTask(msg));
} else {
isSending.set(Boolean.TRUE);
sequencedSend(msg);
}
isSending.set(Boolean.FALSE);
}
} | class SequencedSendTask implements Messenger.Task {
private final Message msg;
SequencedSendTask(Message msg) { this.msg = msg; }
@Override public void run() { sequencedSend(msg); }
@Override public void destroy() { msg.discard(); }
} | class SequencedSendTask implements Messenger.Task {
private final Message msg;
SequencedSendTask(Message msg) { this.msg = msg; }
@Override public void run() { sequencedSend(msg); }
@Override public void destroy() { msg.discard(); }
} |
```suggestion assertEquals(false, array.containsAll(List.of(new StringFieldValue("bob")))); assertEquals(true, array.containsAll(List.of(new StringFieldValue("foo"), new StringFieldValue("boo"), new StringFieldValue("apple")))); ``` | public void testWrappedList() {
Array<StringFieldValue> array = new Array<>(DataType.getArray(DataType.STRING));
List<String> list = new ArrayList<>();
list.add("foo");
list.add("bar");
list.add("baz");
array.assign(list);
assertEquals(3, array.size());
assertEquals(3, list.size());
assertFalse(array.isEmpty());
assertFalse(list.isEmpty());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
array.remove(2);
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
list.remove(1);
assertEquals(1, array.size());
assertEquals(1, list.size());
assertEquals("foo", list.get(0));
assertEquals(new StringFieldValue("foo"), array.get(0));
list.add("bar");
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
array.add(new StringFieldValue("baz"));
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertTrue(array.contains(new StringFieldValue("foo")));
assertTrue(list.contains("foo"));
list.add("foo");
assertEquals(0, list.indexOf("foo"));
assertEquals(0, array.indexOf(new StringFieldValue("foo")));
assertEquals(3, list.lastIndexOf("foo"));
assertEquals(3, array.lastIndexOf(new StringFieldValue("foo")));
list.set(3, "banana");
assertEquals(4, array.size());
assertEquals(4, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals("banana", list.get(3));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertEquals(new StringFieldValue("banana"), array.get(3));
array.set(3, new StringFieldValue("apple"));
assertEquals(4, array.size());
assertEquals(4, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals("apple", list.get(3));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertEquals(new StringFieldValue("apple"), array.get(3));
list.remove("bar");
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("baz", list.get(1));
assertEquals("apple", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("baz"), array.get(1));
assertEquals(new StringFieldValue("apple"), array.get(2));
array.remove(new StringFieldValue("baz"));
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("apple", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("apple"), array.get(1));
assertNotNull(array.toArray(new StringFieldValue[5]));
try {
array.retainAll(new ArrayList<StringFieldValue>());
fail("Not implemented yet.");
} catch (UnsupportedOperationException uoe) {
}
array.add(1, new StringFieldValue("boo"));
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("boo", list.get(1));
assertEquals("apple", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("boo"), array.get(1));
assertEquals(new StringFieldValue("apple"), array.get(2));
array.toString();
List<StringFieldValue> subArray = array.subList(1, 3);
assertEquals(2, subArray.size());
assertEquals(new StringFieldValue("boo"), subArray.get(0));
assertEquals(new StringFieldValue("apple"), subArray.get(1));
assertEquals(false, array.containsAll(Arrays.asList(new StringFieldValue("bob"))));
assertEquals(true, array.containsAll(Arrays.asList(new StringFieldValue("foo"), new StringFieldValue("boo"), new StringFieldValue("apple"))));
array.removeAll(Arrays.asList(new StringFieldValue("foo"), new StringFieldValue("boo")));
assertEquals(1, array.size());
assertEquals(1, list.size());
assertEquals("apple", list.get(0));
assertEquals(new StringFieldValue("apple"), array.get(0));
array.add(new StringFieldValue("ibm"));
assertEquals(2, array.size());
assertEquals(2, list.size());
{
Iterator<StringFieldValue> it = array.iterator();
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("apple"), it.next());
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("ibm"), it.next());
assertFalse(it.hasNext());
}
{
ListIterator<StringFieldValue> it = array.listIterator();
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("apple"), it.next());
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("ibm"), it.next());
assertFalse(it.hasNext());
}
array.addAll(Arrays.asList(new StringFieldValue("microsoft"), new StringFieldValue("google")));
assertEquals(4, array.size());
assertEquals(4, list.size());
array.clear();
assertEquals(0, array.size());
assertEquals(0, list.size());
} | assertEquals(true, array.containsAll(Arrays.asList(new StringFieldValue("foo"), new StringFieldValue("boo"), new StringFieldValue("apple")))); | public void testWrappedList() {
Array<StringFieldValue> array = new Array<>(DataType.getArray(DataType.STRING));
List<String> list = new ArrayList<>();
list.add("foo");
list.add("bar");
list.add("baz");
array.assign(list);
assertEquals(3, array.size());
assertEquals(3, list.size());
assertFalse(array.isEmpty());
assertFalse(list.isEmpty());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
array.remove(2);
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
list.remove(1);
assertEquals(1, array.size());
assertEquals(1, list.size());
assertEquals("foo", list.get(0));
assertEquals(new StringFieldValue("foo"), array.get(0));
list.add("bar");
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
array.add(new StringFieldValue("baz"));
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertTrue(array.contains(new StringFieldValue("foo")));
assertTrue(list.contains("foo"));
list.add("foo");
assertEquals(0, list.indexOf("foo"));
assertEquals(0, array.indexOf(new StringFieldValue("foo")));
assertEquals(3, list.lastIndexOf("foo"));
assertEquals(3, array.lastIndexOf(new StringFieldValue("foo")));
list.set(3, "banana");
assertEquals(4, array.size());
assertEquals(4, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals("banana", list.get(3));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertEquals(new StringFieldValue("banana"), array.get(3));
array.set(3, new StringFieldValue("apple"));
assertEquals(4, array.size());
assertEquals(4, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals("apple", list.get(3));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertEquals(new StringFieldValue("apple"), array.get(3));
list.remove("bar");
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("baz", list.get(1));
assertEquals("apple", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("baz"), array.get(1));
assertEquals(new StringFieldValue("apple"), array.get(2));
array.remove(new StringFieldValue("baz"));
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("apple", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("apple"), array.get(1));
assertNotNull(array.toArray(new StringFieldValue[5]));
try {
array.retainAll(new ArrayList<StringFieldValue>());
fail("Not implemented yet.");
} catch (UnsupportedOperationException uoe) {
}
array.add(1, new StringFieldValue("boo"));
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("boo", list.get(1));
assertEquals("apple", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("boo"), array.get(1));
assertEquals(new StringFieldValue("apple"), array.get(2));
array.toString();
List<StringFieldValue> subArray = array.subList(1, 3);
assertEquals(2, subArray.size());
assertEquals(new StringFieldValue("boo"), subArray.get(0));
assertEquals(new StringFieldValue("apple"), subArray.get(1));
assertEquals(false, array.containsAll(List.of(new StringFieldValue("bob"))));
assertEquals(true, array.containsAll(List.of(new StringFieldValue("foo"), new StringFieldValue("boo"), new StringFieldValue("apple"))));
array.removeAll(List.of(new StringFieldValue("foo"), new StringFieldValue("boo")));
assertEquals(1, array.size());
assertEquals(1, list.size());
assertEquals("apple", list.get(0));
assertEquals(new StringFieldValue("apple"), array.get(0));
array.add(new StringFieldValue("ibm"));
assertEquals(2, array.size());
assertEquals(2, list.size());
{
Iterator<StringFieldValue> it = array.iterator();
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("apple"), it.next());
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("ibm"), it.next());
assertFalse(it.hasNext());
}
{
ListIterator<StringFieldValue> it = array.listIterator();
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("apple"), it.next());
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("ibm"), it.next());
assertFalse(it.hasNext());
}
array.addAll(List.of(new StringFieldValue("microsoft"), new StringFieldValue("google")));
assertEquals(4, array.size());
assertEquals(4, list.size());
array.clear();
assertEquals(0, array.size());
assertEquals(0, list.size());
} | class ArrayTestCase {
@Test
public void testToArray() {
ArrayDataType dt = new ArrayDataType(DataType.STRING);
Array<StringFieldValue> arr = new Array<>(dt);
arr.add(new StringFieldValue("a"));
arr.add(new StringFieldValue("b"));
arr.add(new StringFieldValue("c"));
StringFieldValue[] tooSmall = new StringFieldValue[0];
StringFieldValue[] bigEnough = new StringFieldValue[3];
StringFieldValue[] a = arr.toArray(tooSmall);
assertNotSame(tooSmall, a);
assertEquals(new StringFieldValue("a"), a[0]);
assertEquals(new StringFieldValue("b"), a[1]);
assertEquals(new StringFieldValue("c"), a[2]);
StringFieldValue[] b = arr.toArray(bigEnough);
assertSame(bigEnough, b);
assertEquals(new StringFieldValue("a"), b[0]);
assertEquals(new StringFieldValue("b"), b[1]);
assertEquals(new StringFieldValue("c"), b[2]);
}
@Test
public void testCreateIllegalArray() {
ArrayList<FieldValue> arrayList = new ArrayList<>();
arrayList.add(new StringFieldValue("foo"));
arrayList.add(new IntegerFieldValue(1000));
DataType stringType = new ArrayDataType(DataType.STRING);
try {
Array<FieldValue> illegalArray = new Array<>(stringType, arrayList);
fail("Expected an exception");
} catch (IllegalArgumentException e) {
assertEquals("FieldValue 1000 is not compatible with datatype "
+ "Array<string> (code: -1486737430).",
e.getMessage());
}
DataType intType = new ArrayDataType(DataType.INT);
Array<IntegerFieldValue> intArray = new Array<>(intType);
intArray.add(new IntegerFieldValue(42));
Array<StringFieldValue> stringArray = new Array<>(stringType);
try {
stringArray.assign(intArray);
fail("Expected an exception");
} catch (IllegalArgumentException e) {
assertEquals("Incompatible data types. Got datatype int (code: 0),"
+ " expected datatype string (code: 2)",
e.getMessage());
}
}
@Test
@Test
public void testListWrapperToArray() {
Array<StringFieldValue> array = new Array<>(new ArrayDataType(DataType.STRING));
List<StringFieldValue> assignFrom = new ArrayList<>(3);
assignFrom.add(new StringFieldValue("a"));
assignFrom.add(new StringFieldValue("b"));
assignFrom.add(new StringFieldValue("c"));
array.assign(assignFrom);
final StringFieldValue[] expected = new StringFieldValue[] { new StringFieldValue("a"), new StringFieldValue("b"),
new StringFieldValue("c") };
assertTrue(Arrays.equals(expected, array.toArray(new StringFieldValue[0])));
}
@Test
public void testEquals() {
Array<StringFieldValue> a = new Array<>(new ArrayDataType(DataType.STRING));
a.add(new StringFieldValue("mumbo jumbo 1"));
a.add(new StringFieldValue("mumbo jumbo 2"));
Array<StringFieldValue> b = new Array<>(new ArrayDataType(DataType.STRING));
b.add(new StringFieldValue("mumbo jumbo 1"));
b.add(new StringFieldValue("mumbo jumbo 2"));
assertEquals(a, b);
assertEquals(b, a);
assertEquals(0, a.compareTo(b));
assertEquals(0, b.compareTo(a));
b.clear();
List<String> l = new ArrayList<>();
l.add("mumbo jumbo 1");
l.add("mumbo jumbo 2");
b.assign(l);
assertEquals(a, b);
assertEquals(b, a);
assertEquals(0, a.compareTo(b));
assertEquals(0, b.compareTo(a));
}
@Test
public void testLess() {
Array<StringFieldValue> a = new Array<>(new ArrayDataType(DataType.STRING));
a.add(new StringFieldValue("mumbo jumbo 1"));
a.add(new StringFieldValue("mumbo jumbo 3"));
Array<StringFieldValue> b = new Array<>(new ArrayDataType(DataType.STRING));
b.add(new StringFieldValue("mumbo jumbo 1"));
b.add(new StringFieldValue("mumbo jumbo 2"));
assertNotEquals(a, b);
assertNotEquals(b, a);
assertEquals(1, a.compareTo(b));
assertEquals(-1, b.compareTo(a));
b.clear();
List<String> l = new ArrayList<>();
l.add("mumbo jumbo 1");
l.add("mumbo jumbo 2");
b.assign(l);
assertNotEquals(a, b);
assertNotEquals(b, a);
assertEquals(1, a.compareTo(b));
assertEquals(-1, b.compareTo(a));
}
} | class ArrayTestCase {
@Test
public void testToArray() {
ArrayDataType dt = new ArrayDataType(DataType.STRING);
Array<StringFieldValue> arr = new Array<>(dt);
arr.add(new StringFieldValue("a"));
arr.add(new StringFieldValue("b"));
arr.add(new StringFieldValue("c"));
StringFieldValue[] tooSmall = new StringFieldValue[0];
StringFieldValue[] bigEnough = new StringFieldValue[3];
StringFieldValue[] a = arr.toArray(tooSmall);
assertNotSame(tooSmall, a);
assertEquals(new StringFieldValue("a"), a[0]);
assertEquals(new StringFieldValue("b"), a[1]);
assertEquals(new StringFieldValue("c"), a[2]);
StringFieldValue[] b = arr.toArray(bigEnough);
assertSame(bigEnough, b);
assertEquals(new StringFieldValue("a"), b[0]);
assertEquals(new StringFieldValue("b"), b[1]);
assertEquals(new StringFieldValue("c"), b[2]);
}
@Test
public void testCreateIllegalArray() {
ArrayList<FieldValue> arrayList = new ArrayList<>();
arrayList.add(new StringFieldValue("foo"));
arrayList.add(new IntegerFieldValue(1000));
DataType stringType = new ArrayDataType(DataType.STRING);
try {
Array<FieldValue> illegalArray = new Array<>(stringType, arrayList);
fail("Expected an exception");
} catch (IllegalArgumentException e) {
assertEquals("FieldValue 1000 is not compatible with datatype "
+ "Array<string> (code: -1486737430).",
e.getMessage());
}
DataType intType = new ArrayDataType(DataType.INT);
Array<IntegerFieldValue> intArray = new Array<>(intType);
intArray.add(new IntegerFieldValue(42));
Array<StringFieldValue> stringArray = new Array<>(stringType);
try {
stringArray.assign(intArray);
fail("Expected an exception");
} catch (IllegalArgumentException e) {
assertEquals("Incompatible data types. Got datatype int (code: 0),"
+ " expected datatype string (code: 2)",
e.getMessage());
}
}
@Test
@Test
public void testListWrapperToArray() {
Array<StringFieldValue> array = new Array<>(new ArrayDataType(DataType.STRING));
List<StringFieldValue> assignFrom = new ArrayList<>(3);
assignFrom.add(new StringFieldValue("a"));
assignFrom.add(new StringFieldValue("b"));
assignFrom.add(new StringFieldValue("c"));
array.assign(assignFrom);
final StringFieldValue[] expected = new StringFieldValue[] { new StringFieldValue("a"), new StringFieldValue("b"),
new StringFieldValue("c") };
assertTrue(Arrays.equals(expected, array.toArray(new StringFieldValue[0])));
}
@Test
public void testEquals() {
Array<StringFieldValue> a = new Array<>(new ArrayDataType(DataType.STRING));
a.add(new StringFieldValue("mumbo jumbo 1"));
a.add(new StringFieldValue("mumbo jumbo 2"));
Array<StringFieldValue> b = new Array<>(new ArrayDataType(DataType.STRING));
b.add(new StringFieldValue("mumbo jumbo 1"));
b.add(new StringFieldValue("mumbo jumbo 2"));
assertEquals(a, b);
assertEquals(b, a);
assertEquals(0, a.compareTo(b));
assertEquals(0, b.compareTo(a));
b.clear();
List<String> l = new ArrayList<>();
l.add("mumbo jumbo 1");
l.add("mumbo jumbo 2");
b.assign(l);
assertEquals(a, b);
assertEquals(b, a);
assertEquals(0, a.compareTo(b));
assertEquals(0, b.compareTo(a));
}
@Test
public void testLess() {
Array<StringFieldValue> a = new Array<>(new ArrayDataType(DataType.STRING));
a.add(new StringFieldValue("mumbo jumbo 1"));
a.add(new StringFieldValue("mumbo jumbo 3"));
Array<StringFieldValue> b = new Array<>(new ArrayDataType(DataType.STRING));
b.add(new StringFieldValue("mumbo jumbo 1"));
b.add(new StringFieldValue("mumbo jumbo 2"));
assertNotEquals(a, b);
assertNotEquals(b, a);
assertEquals(1, a.compareTo(b));
assertEquals(-1, b.compareTo(a));
b.clear();
List<String> l = new ArrayList<>();
l.add("mumbo jumbo 1");
l.add("mumbo jumbo 2");
b.assign(l);
assertNotEquals(a, b);
assertNotEquals(b, a);
assertEquals(1, a.compareTo(b));
assertEquals(-1, b.compareTo(a));
}
} |
```suggestion array.removeAll(List.of(new StringFieldValue("foo"), new StringFieldValue("boo"))); ``` | public void testWrappedList() {
Array<StringFieldValue> array = new Array<>(DataType.getArray(DataType.STRING));
List<String> list = new ArrayList<>();
list.add("foo");
list.add("bar");
list.add("baz");
array.assign(list);
assertEquals(3, array.size());
assertEquals(3, list.size());
assertFalse(array.isEmpty());
assertFalse(list.isEmpty());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
array.remove(2);
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
list.remove(1);
assertEquals(1, array.size());
assertEquals(1, list.size());
assertEquals("foo", list.get(0));
assertEquals(new StringFieldValue("foo"), array.get(0));
list.add("bar");
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
array.add(new StringFieldValue("baz"));
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertTrue(array.contains(new StringFieldValue("foo")));
assertTrue(list.contains("foo"));
list.add("foo");
assertEquals(0, list.indexOf("foo"));
assertEquals(0, array.indexOf(new StringFieldValue("foo")));
assertEquals(3, list.lastIndexOf("foo"));
assertEquals(3, array.lastIndexOf(new StringFieldValue("foo")));
list.set(3, "banana");
assertEquals(4, array.size());
assertEquals(4, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals("banana", list.get(3));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertEquals(new StringFieldValue("banana"), array.get(3));
array.set(3, new StringFieldValue("apple"));
assertEquals(4, array.size());
assertEquals(4, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals("apple", list.get(3));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertEquals(new StringFieldValue("apple"), array.get(3));
list.remove("bar");
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("baz", list.get(1));
assertEquals("apple", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("baz"), array.get(1));
assertEquals(new StringFieldValue("apple"), array.get(2));
array.remove(new StringFieldValue("baz"));
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("apple", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("apple"), array.get(1));
assertNotNull(array.toArray(new StringFieldValue[5]));
try {
array.retainAll(new ArrayList<StringFieldValue>());
fail("Not implemented yet.");
} catch (UnsupportedOperationException uoe) {
}
array.add(1, new StringFieldValue("boo"));
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("boo", list.get(1));
assertEquals("apple", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("boo"), array.get(1));
assertEquals(new StringFieldValue("apple"), array.get(2));
array.toString();
List<StringFieldValue> subArray = array.subList(1, 3);
assertEquals(2, subArray.size());
assertEquals(new StringFieldValue("boo"), subArray.get(0));
assertEquals(new StringFieldValue("apple"), subArray.get(1));
assertEquals(false, array.containsAll(Arrays.asList(new StringFieldValue("bob"))));
assertEquals(true, array.containsAll(Arrays.asList(new StringFieldValue("foo"), new StringFieldValue("boo"), new StringFieldValue("apple"))));
array.removeAll(Arrays.asList(new StringFieldValue("foo"), new StringFieldValue("boo")));
assertEquals(1, array.size());
assertEquals(1, list.size());
assertEquals("apple", list.get(0));
assertEquals(new StringFieldValue("apple"), array.get(0));
array.add(new StringFieldValue("ibm"));
assertEquals(2, array.size());
assertEquals(2, list.size());
{
Iterator<StringFieldValue> it = array.iterator();
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("apple"), it.next());
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("ibm"), it.next());
assertFalse(it.hasNext());
}
{
ListIterator<StringFieldValue> it = array.listIterator();
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("apple"), it.next());
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("ibm"), it.next());
assertFalse(it.hasNext());
}
array.addAll(Arrays.asList(new StringFieldValue("microsoft"), new StringFieldValue("google")));
assertEquals(4, array.size());
assertEquals(4, list.size());
array.clear();
assertEquals(0, array.size());
assertEquals(0, list.size());
} | array.removeAll(Arrays.asList(new StringFieldValue("foo"), new StringFieldValue("boo"))); | public void testWrappedList() {
Array<StringFieldValue> array = new Array<>(DataType.getArray(DataType.STRING));
List<String> list = new ArrayList<>();
list.add("foo");
list.add("bar");
list.add("baz");
array.assign(list);
assertEquals(3, array.size());
assertEquals(3, list.size());
assertFalse(array.isEmpty());
assertFalse(list.isEmpty());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
array.remove(2);
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
list.remove(1);
assertEquals(1, array.size());
assertEquals(1, list.size());
assertEquals("foo", list.get(0));
assertEquals(new StringFieldValue("foo"), array.get(0));
list.add("bar");
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
array.add(new StringFieldValue("baz"));
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertTrue(array.contains(new StringFieldValue("foo")));
assertTrue(list.contains("foo"));
list.add("foo");
assertEquals(0, list.indexOf("foo"));
assertEquals(0, array.indexOf(new StringFieldValue("foo")));
assertEquals(3, list.lastIndexOf("foo"));
assertEquals(3, array.lastIndexOf(new StringFieldValue("foo")));
list.set(3, "banana");
assertEquals(4, array.size());
assertEquals(4, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals("banana", list.get(3));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertEquals(new StringFieldValue("banana"), array.get(3));
array.set(3, new StringFieldValue("apple"));
assertEquals(4, array.size());
assertEquals(4, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals("apple", list.get(3));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertEquals(new StringFieldValue("apple"), array.get(3));
list.remove("bar");
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("baz", list.get(1));
assertEquals("apple", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("baz"), array.get(1));
assertEquals(new StringFieldValue("apple"), array.get(2));
array.remove(new StringFieldValue("baz"));
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("apple", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("apple"), array.get(1));
assertNotNull(array.toArray(new StringFieldValue[5]));
try {
array.retainAll(new ArrayList<StringFieldValue>());
fail("Not implemented yet.");
} catch (UnsupportedOperationException uoe) {
}
array.add(1, new StringFieldValue("boo"));
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("boo", list.get(1));
assertEquals("apple", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("boo"), array.get(1));
assertEquals(new StringFieldValue("apple"), array.get(2));
array.toString();
List<StringFieldValue> subArray = array.subList(1, 3);
assertEquals(2, subArray.size());
assertEquals(new StringFieldValue("boo"), subArray.get(0));
assertEquals(new StringFieldValue("apple"), subArray.get(1));
assertEquals(false, array.containsAll(List.of(new StringFieldValue("bob"))));
assertEquals(true, array.containsAll(List.of(new StringFieldValue("foo"), new StringFieldValue("boo"), new StringFieldValue("apple"))));
array.removeAll(List.of(new StringFieldValue("foo"), new StringFieldValue("boo")));
assertEquals(1, array.size());
assertEquals(1, list.size());
assertEquals("apple", list.get(0));
assertEquals(new StringFieldValue("apple"), array.get(0));
array.add(new StringFieldValue("ibm"));
assertEquals(2, array.size());
assertEquals(2, list.size());
{
Iterator<StringFieldValue> it = array.iterator();
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("apple"), it.next());
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("ibm"), it.next());
assertFalse(it.hasNext());
}
{
ListIterator<StringFieldValue> it = array.listIterator();
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("apple"), it.next());
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("ibm"), it.next());
assertFalse(it.hasNext());
}
array.addAll(List.of(new StringFieldValue("microsoft"), new StringFieldValue("google")));
assertEquals(4, array.size());
assertEquals(4, list.size());
array.clear();
assertEquals(0, array.size());
assertEquals(0, list.size());
} | class ArrayTestCase {
@Test
public void testToArray() {
ArrayDataType dt = new ArrayDataType(DataType.STRING);
Array<StringFieldValue> arr = new Array<>(dt);
arr.add(new StringFieldValue("a"));
arr.add(new StringFieldValue("b"));
arr.add(new StringFieldValue("c"));
StringFieldValue[] tooSmall = new StringFieldValue[0];
StringFieldValue[] bigEnough = new StringFieldValue[3];
StringFieldValue[] a = arr.toArray(tooSmall);
assertNotSame(tooSmall, a);
assertEquals(new StringFieldValue("a"), a[0]);
assertEquals(new StringFieldValue("b"), a[1]);
assertEquals(new StringFieldValue("c"), a[2]);
StringFieldValue[] b = arr.toArray(bigEnough);
assertSame(bigEnough, b);
assertEquals(new StringFieldValue("a"), b[0]);
assertEquals(new StringFieldValue("b"), b[1]);
assertEquals(new StringFieldValue("c"), b[2]);
}
@Test
public void testCreateIllegalArray() {
ArrayList<FieldValue> arrayList = new ArrayList<>();
arrayList.add(new StringFieldValue("foo"));
arrayList.add(new IntegerFieldValue(1000));
DataType stringType = new ArrayDataType(DataType.STRING);
try {
Array<FieldValue> illegalArray = new Array<>(stringType, arrayList);
fail("Expected an exception");
} catch (IllegalArgumentException e) {
assertEquals("FieldValue 1000 is not compatible with datatype "
+ "Array<string> (code: -1486737430).",
e.getMessage());
}
DataType intType = new ArrayDataType(DataType.INT);
Array<IntegerFieldValue> intArray = new Array<>(intType);
intArray.add(new IntegerFieldValue(42));
Array<StringFieldValue> stringArray = new Array<>(stringType);
try {
stringArray.assign(intArray);
fail("Expected an exception");
} catch (IllegalArgumentException e) {
assertEquals("Incompatible data types. Got datatype int (code: 0),"
+ " expected datatype string (code: 2)",
e.getMessage());
}
}
@Test
@Test
public void testListWrapperToArray() {
Array<StringFieldValue> array = new Array<>(new ArrayDataType(DataType.STRING));
List<StringFieldValue> assignFrom = new ArrayList<>(3);
assignFrom.add(new StringFieldValue("a"));
assignFrom.add(new StringFieldValue("b"));
assignFrom.add(new StringFieldValue("c"));
array.assign(assignFrom);
final StringFieldValue[] expected = new StringFieldValue[] { new StringFieldValue("a"), new StringFieldValue("b"),
new StringFieldValue("c") };
assertTrue(Arrays.equals(expected, array.toArray(new StringFieldValue[0])));
}
@Test
public void testEquals() {
Array<StringFieldValue> a = new Array<>(new ArrayDataType(DataType.STRING));
a.add(new StringFieldValue("mumbo jumbo 1"));
a.add(new StringFieldValue("mumbo jumbo 2"));
Array<StringFieldValue> b = new Array<>(new ArrayDataType(DataType.STRING));
b.add(new StringFieldValue("mumbo jumbo 1"));
b.add(new StringFieldValue("mumbo jumbo 2"));
assertEquals(a, b);
assertEquals(b, a);
assertEquals(0, a.compareTo(b));
assertEquals(0, b.compareTo(a));
b.clear();
List<String> l = new ArrayList<>();
l.add("mumbo jumbo 1");
l.add("mumbo jumbo 2");
b.assign(l);
assertEquals(a, b);
assertEquals(b, a);
assertEquals(0, a.compareTo(b));
assertEquals(0, b.compareTo(a));
}
@Test
public void testLess() {
Array<StringFieldValue> a = new Array<>(new ArrayDataType(DataType.STRING));
a.add(new StringFieldValue("mumbo jumbo 1"));
a.add(new StringFieldValue("mumbo jumbo 3"));
Array<StringFieldValue> b = new Array<>(new ArrayDataType(DataType.STRING));
b.add(new StringFieldValue("mumbo jumbo 1"));
b.add(new StringFieldValue("mumbo jumbo 2"));
assertNotEquals(a, b);
assertNotEquals(b, a);
assertEquals(1, a.compareTo(b));
assertEquals(-1, b.compareTo(a));
b.clear();
List<String> l = new ArrayList<>();
l.add("mumbo jumbo 1");
l.add("mumbo jumbo 2");
b.assign(l);
assertNotEquals(a, b);
assertNotEquals(b, a);
assertEquals(1, a.compareTo(b));
assertEquals(-1, b.compareTo(a));
}
} | class ArrayTestCase {
@Test
public void testToArray() {
ArrayDataType dt = new ArrayDataType(DataType.STRING);
Array<StringFieldValue> arr = new Array<>(dt);
arr.add(new StringFieldValue("a"));
arr.add(new StringFieldValue("b"));
arr.add(new StringFieldValue("c"));
StringFieldValue[] tooSmall = new StringFieldValue[0];
StringFieldValue[] bigEnough = new StringFieldValue[3];
StringFieldValue[] a = arr.toArray(tooSmall);
assertNotSame(tooSmall, a);
assertEquals(new StringFieldValue("a"), a[0]);
assertEquals(new StringFieldValue("b"), a[1]);
assertEquals(new StringFieldValue("c"), a[2]);
StringFieldValue[] b = arr.toArray(bigEnough);
assertSame(bigEnough, b);
assertEquals(new StringFieldValue("a"), b[0]);
assertEquals(new StringFieldValue("b"), b[1]);
assertEquals(new StringFieldValue("c"), b[2]);
}
@Test
public void testCreateIllegalArray() {
ArrayList<FieldValue> arrayList = new ArrayList<>();
arrayList.add(new StringFieldValue("foo"));
arrayList.add(new IntegerFieldValue(1000));
DataType stringType = new ArrayDataType(DataType.STRING);
try {
Array<FieldValue> illegalArray = new Array<>(stringType, arrayList);
fail("Expected an exception");
} catch (IllegalArgumentException e) {
assertEquals("FieldValue 1000 is not compatible with datatype "
+ "Array<string> (code: -1486737430).",
e.getMessage());
}
DataType intType = new ArrayDataType(DataType.INT);
Array<IntegerFieldValue> intArray = new Array<>(intType);
intArray.add(new IntegerFieldValue(42));
Array<StringFieldValue> stringArray = new Array<>(stringType);
try {
stringArray.assign(intArray);
fail("Expected an exception");
} catch (IllegalArgumentException e) {
assertEquals("Incompatible data types. Got datatype int (code: 0),"
+ " expected datatype string (code: 2)",
e.getMessage());
}
}
@Test
@Test
public void testListWrapperToArray() {
Array<StringFieldValue> array = new Array<>(new ArrayDataType(DataType.STRING));
List<StringFieldValue> assignFrom = new ArrayList<>(3);
assignFrom.add(new StringFieldValue("a"));
assignFrom.add(new StringFieldValue("b"));
assignFrom.add(new StringFieldValue("c"));
array.assign(assignFrom);
final StringFieldValue[] expected = new StringFieldValue[] { new StringFieldValue("a"), new StringFieldValue("b"),
new StringFieldValue("c") };
assertTrue(Arrays.equals(expected, array.toArray(new StringFieldValue[0])));
}
@Test
public void testEquals() {
Array<StringFieldValue> a = new Array<>(new ArrayDataType(DataType.STRING));
a.add(new StringFieldValue("mumbo jumbo 1"));
a.add(new StringFieldValue("mumbo jumbo 2"));
Array<StringFieldValue> b = new Array<>(new ArrayDataType(DataType.STRING));
b.add(new StringFieldValue("mumbo jumbo 1"));
b.add(new StringFieldValue("mumbo jumbo 2"));
assertEquals(a, b);
assertEquals(b, a);
assertEquals(0, a.compareTo(b));
assertEquals(0, b.compareTo(a));
b.clear();
List<String> l = new ArrayList<>();
l.add("mumbo jumbo 1");
l.add("mumbo jumbo 2");
b.assign(l);
assertEquals(a, b);
assertEquals(b, a);
assertEquals(0, a.compareTo(b));
assertEquals(0, b.compareTo(a));
}
@Test
public void testLess() {
Array<StringFieldValue> a = new Array<>(new ArrayDataType(DataType.STRING));
a.add(new StringFieldValue("mumbo jumbo 1"));
a.add(new StringFieldValue("mumbo jumbo 3"));
Array<StringFieldValue> b = new Array<>(new ArrayDataType(DataType.STRING));
b.add(new StringFieldValue("mumbo jumbo 1"));
b.add(new StringFieldValue("mumbo jumbo 2"));
assertNotEquals(a, b);
assertNotEquals(b, a);
assertEquals(1, a.compareTo(b));
assertEquals(-1, b.compareTo(a));
b.clear();
List<String> l = new ArrayList<>();
l.add("mumbo jumbo 1");
l.add("mumbo jumbo 2");
b.assign(l);
assertNotEquals(a, b);
assertNotEquals(b, a);
assertEquals(1, a.compareTo(b));
assertEquals(-1, b.compareTo(a));
}
} |
```suggestion array.addAll(List.of(new StringFieldValue("microsoft"), new StringFieldValue("google"))); ``` | public void testWrappedList() {
Array<StringFieldValue> array = new Array<>(DataType.getArray(DataType.STRING));
List<String> list = new ArrayList<>();
list.add("foo");
list.add("bar");
list.add("baz");
array.assign(list);
assertEquals(3, array.size());
assertEquals(3, list.size());
assertFalse(array.isEmpty());
assertFalse(list.isEmpty());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
array.remove(2);
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
list.remove(1);
assertEquals(1, array.size());
assertEquals(1, list.size());
assertEquals("foo", list.get(0));
assertEquals(new StringFieldValue("foo"), array.get(0));
list.add("bar");
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
array.add(new StringFieldValue("baz"));
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertTrue(array.contains(new StringFieldValue("foo")));
assertTrue(list.contains("foo"));
list.add("foo");
assertEquals(0, list.indexOf("foo"));
assertEquals(0, array.indexOf(new StringFieldValue("foo")));
assertEquals(3, list.lastIndexOf("foo"));
assertEquals(3, array.lastIndexOf(new StringFieldValue("foo")));
list.set(3, "banana");
assertEquals(4, array.size());
assertEquals(4, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals("banana", list.get(3));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertEquals(new StringFieldValue("banana"), array.get(3));
array.set(3, new StringFieldValue("apple"));
assertEquals(4, array.size());
assertEquals(4, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals("apple", list.get(3));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertEquals(new StringFieldValue("apple"), array.get(3));
list.remove("bar");
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("baz", list.get(1));
assertEquals("apple", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("baz"), array.get(1));
assertEquals(new StringFieldValue("apple"), array.get(2));
array.remove(new StringFieldValue("baz"));
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("apple", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("apple"), array.get(1));
assertNotNull(array.toArray(new StringFieldValue[5]));
try {
array.retainAll(new ArrayList<StringFieldValue>());
fail("Not implemented yet.");
} catch (UnsupportedOperationException uoe) {
}
array.add(1, new StringFieldValue("boo"));
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("boo", list.get(1));
assertEquals("apple", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("boo"), array.get(1));
assertEquals(new StringFieldValue("apple"), array.get(2));
array.toString();
List<StringFieldValue> subArray = array.subList(1, 3);
assertEquals(2, subArray.size());
assertEquals(new StringFieldValue("boo"), subArray.get(0));
assertEquals(new StringFieldValue("apple"), subArray.get(1));
assertEquals(false, array.containsAll(Arrays.asList(new StringFieldValue("bob"))));
assertEquals(true, array.containsAll(Arrays.asList(new StringFieldValue("foo"), new StringFieldValue("boo"), new StringFieldValue("apple"))));
array.removeAll(Arrays.asList(new StringFieldValue("foo"), new StringFieldValue("boo")));
assertEquals(1, array.size());
assertEquals(1, list.size());
assertEquals("apple", list.get(0));
assertEquals(new StringFieldValue("apple"), array.get(0));
array.add(new StringFieldValue("ibm"));
assertEquals(2, array.size());
assertEquals(2, list.size());
{
Iterator<StringFieldValue> it = array.iterator();
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("apple"), it.next());
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("ibm"), it.next());
assertFalse(it.hasNext());
}
{
ListIterator<StringFieldValue> it = array.listIterator();
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("apple"), it.next());
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("ibm"), it.next());
assertFalse(it.hasNext());
}
array.addAll(Arrays.asList(new StringFieldValue("microsoft"), new StringFieldValue("google")));
assertEquals(4, array.size());
assertEquals(4, list.size());
array.clear();
assertEquals(0, array.size());
assertEquals(0, list.size());
} | array.addAll(Arrays.asList(new StringFieldValue("microsoft"), new StringFieldValue("google"))); | public void testWrappedList() {
Array<StringFieldValue> array = new Array<>(DataType.getArray(DataType.STRING));
List<String> list = new ArrayList<>();
list.add("foo");
list.add("bar");
list.add("baz");
array.assign(list);
assertEquals(3, array.size());
assertEquals(3, list.size());
assertFalse(array.isEmpty());
assertFalse(list.isEmpty());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
array.remove(2);
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
list.remove(1);
assertEquals(1, array.size());
assertEquals(1, list.size());
assertEquals("foo", list.get(0));
assertEquals(new StringFieldValue("foo"), array.get(0));
list.add("bar");
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
array.add(new StringFieldValue("baz"));
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertTrue(array.contains(new StringFieldValue("foo")));
assertTrue(list.contains("foo"));
list.add("foo");
assertEquals(0, list.indexOf("foo"));
assertEquals(0, array.indexOf(new StringFieldValue("foo")));
assertEquals(3, list.lastIndexOf("foo"));
assertEquals(3, array.lastIndexOf(new StringFieldValue("foo")));
list.set(3, "banana");
assertEquals(4, array.size());
assertEquals(4, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals("banana", list.get(3));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertEquals(new StringFieldValue("banana"), array.get(3));
array.set(3, new StringFieldValue("apple"));
assertEquals(4, array.size());
assertEquals(4, list.size());
assertEquals("foo", list.get(0));
assertEquals("bar", list.get(1));
assertEquals("baz", list.get(2));
assertEquals("apple", list.get(3));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("bar"), array.get(1));
assertEquals(new StringFieldValue("baz"), array.get(2));
assertEquals(new StringFieldValue("apple"), array.get(3));
list.remove("bar");
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("baz", list.get(1));
assertEquals("apple", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("baz"), array.get(1));
assertEquals(new StringFieldValue("apple"), array.get(2));
array.remove(new StringFieldValue("baz"));
assertEquals(2, array.size());
assertEquals(2, list.size());
assertEquals("foo", list.get(0));
assertEquals("apple", list.get(1));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("apple"), array.get(1));
assertNotNull(array.toArray(new StringFieldValue[5]));
try {
array.retainAll(new ArrayList<StringFieldValue>());
fail("Not implemented yet.");
} catch (UnsupportedOperationException uoe) {
}
array.add(1, new StringFieldValue("boo"));
assertEquals(3, array.size());
assertEquals(3, list.size());
assertEquals("foo", list.get(0));
assertEquals("boo", list.get(1));
assertEquals("apple", list.get(2));
assertEquals(new StringFieldValue("foo"), array.get(0));
assertEquals(new StringFieldValue("boo"), array.get(1));
assertEquals(new StringFieldValue("apple"), array.get(2));
array.toString();
List<StringFieldValue> subArray = array.subList(1, 3);
assertEquals(2, subArray.size());
assertEquals(new StringFieldValue("boo"), subArray.get(0));
assertEquals(new StringFieldValue("apple"), subArray.get(1));
assertEquals(false, array.containsAll(List.of(new StringFieldValue("bob"))));
assertEquals(true, array.containsAll(List.of(new StringFieldValue("foo"), new StringFieldValue("boo"), new StringFieldValue("apple"))));
array.removeAll(List.of(new StringFieldValue("foo"), new StringFieldValue("boo")));
assertEquals(1, array.size());
assertEquals(1, list.size());
assertEquals("apple", list.get(0));
assertEquals(new StringFieldValue("apple"), array.get(0));
array.add(new StringFieldValue("ibm"));
assertEquals(2, array.size());
assertEquals(2, list.size());
{
Iterator<StringFieldValue> it = array.iterator();
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("apple"), it.next());
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("ibm"), it.next());
assertFalse(it.hasNext());
}
{
ListIterator<StringFieldValue> it = array.listIterator();
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("apple"), it.next());
assertTrue(it.hasNext());
assertEquals(new StringFieldValue("ibm"), it.next());
assertFalse(it.hasNext());
}
array.addAll(List.of(new StringFieldValue("microsoft"), new StringFieldValue("google")));
assertEquals(4, array.size());
assertEquals(4, list.size());
array.clear();
assertEquals(0, array.size());
assertEquals(0, list.size());
} | class ArrayTestCase {
@Test
public void testToArray() {
ArrayDataType dt = new ArrayDataType(DataType.STRING);
Array<StringFieldValue> arr = new Array<>(dt);
arr.add(new StringFieldValue("a"));
arr.add(new StringFieldValue("b"));
arr.add(new StringFieldValue("c"));
StringFieldValue[] tooSmall = new StringFieldValue[0];
StringFieldValue[] bigEnough = new StringFieldValue[3];
StringFieldValue[] a = arr.toArray(tooSmall);
assertNotSame(tooSmall, a);
assertEquals(new StringFieldValue("a"), a[0]);
assertEquals(new StringFieldValue("b"), a[1]);
assertEquals(new StringFieldValue("c"), a[2]);
StringFieldValue[] b = arr.toArray(bigEnough);
assertSame(bigEnough, b);
assertEquals(new StringFieldValue("a"), b[0]);
assertEquals(new StringFieldValue("b"), b[1]);
assertEquals(new StringFieldValue("c"), b[2]);
}
@Test
public void testCreateIllegalArray() {
ArrayList<FieldValue> arrayList = new ArrayList<>();
arrayList.add(new StringFieldValue("foo"));
arrayList.add(new IntegerFieldValue(1000));
DataType stringType = new ArrayDataType(DataType.STRING);
try {
Array<FieldValue> illegalArray = new Array<>(stringType, arrayList);
fail("Expected an exception");
} catch (IllegalArgumentException e) {
assertEquals("FieldValue 1000 is not compatible with datatype "
+ "Array<string> (code: -1486737430).",
e.getMessage());
}
DataType intType = new ArrayDataType(DataType.INT);
Array<IntegerFieldValue> intArray = new Array<>(intType);
intArray.add(new IntegerFieldValue(42));
Array<StringFieldValue> stringArray = new Array<>(stringType);
try {
stringArray.assign(intArray);
fail("Expected an exception");
} catch (IllegalArgumentException e) {
assertEquals("Incompatible data types. Got datatype int (code: 0),"
+ " expected datatype string (code: 2)",
e.getMessage());
}
}
@Test
@Test
public void testListWrapperToArray() {
Array<StringFieldValue> array = new Array<>(new ArrayDataType(DataType.STRING));
List<StringFieldValue> assignFrom = new ArrayList<>(3);
assignFrom.add(new StringFieldValue("a"));
assignFrom.add(new StringFieldValue("b"));
assignFrom.add(new StringFieldValue("c"));
array.assign(assignFrom);
final StringFieldValue[] expected = new StringFieldValue[] { new StringFieldValue("a"), new StringFieldValue("b"),
new StringFieldValue("c") };
assertTrue(Arrays.equals(expected, array.toArray(new StringFieldValue[0])));
}
@Test
public void testEquals() {
Array<StringFieldValue> a = new Array<>(new ArrayDataType(DataType.STRING));
a.add(new StringFieldValue("mumbo jumbo 1"));
a.add(new StringFieldValue("mumbo jumbo 2"));
Array<StringFieldValue> b = new Array<>(new ArrayDataType(DataType.STRING));
b.add(new StringFieldValue("mumbo jumbo 1"));
b.add(new StringFieldValue("mumbo jumbo 2"));
assertEquals(a, b);
assertEquals(b, a);
assertEquals(0, a.compareTo(b));
assertEquals(0, b.compareTo(a));
b.clear();
List<String> l = new ArrayList<>();
l.add("mumbo jumbo 1");
l.add("mumbo jumbo 2");
b.assign(l);
assertEquals(a, b);
assertEquals(b, a);
assertEquals(0, a.compareTo(b));
assertEquals(0, b.compareTo(a));
}
@Test
public void testLess() {
Array<StringFieldValue> a = new Array<>(new ArrayDataType(DataType.STRING));
a.add(new StringFieldValue("mumbo jumbo 1"));
a.add(new StringFieldValue("mumbo jumbo 3"));
Array<StringFieldValue> b = new Array<>(new ArrayDataType(DataType.STRING));
b.add(new StringFieldValue("mumbo jumbo 1"));
b.add(new StringFieldValue("mumbo jumbo 2"));
assertNotEquals(a, b);
assertNotEquals(b, a);
assertEquals(1, a.compareTo(b));
assertEquals(-1, b.compareTo(a));
b.clear();
List<String> l = new ArrayList<>();
l.add("mumbo jumbo 1");
l.add("mumbo jumbo 2");
b.assign(l);
assertNotEquals(a, b);
assertNotEquals(b, a);
assertEquals(1, a.compareTo(b));
assertEquals(-1, b.compareTo(a));
}
} | class ArrayTestCase {
@Test
public void testToArray() {
ArrayDataType dt = new ArrayDataType(DataType.STRING);
Array<StringFieldValue> arr = new Array<>(dt);
arr.add(new StringFieldValue("a"));
arr.add(new StringFieldValue("b"));
arr.add(new StringFieldValue("c"));
StringFieldValue[] tooSmall = new StringFieldValue[0];
StringFieldValue[] bigEnough = new StringFieldValue[3];
StringFieldValue[] a = arr.toArray(tooSmall);
assertNotSame(tooSmall, a);
assertEquals(new StringFieldValue("a"), a[0]);
assertEquals(new StringFieldValue("b"), a[1]);
assertEquals(new StringFieldValue("c"), a[2]);
StringFieldValue[] b = arr.toArray(bigEnough);
assertSame(bigEnough, b);
assertEquals(new StringFieldValue("a"), b[0]);
assertEquals(new StringFieldValue("b"), b[1]);
assertEquals(new StringFieldValue("c"), b[2]);
}
@Test
public void testCreateIllegalArray() {
ArrayList<FieldValue> arrayList = new ArrayList<>();
arrayList.add(new StringFieldValue("foo"));
arrayList.add(new IntegerFieldValue(1000));
DataType stringType = new ArrayDataType(DataType.STRING);
try {
Array<FieldValue> illegalArray = new Array<>(stringType, arrayList);
fail("Expected an exception");
} catch (IllegalArgumentException e) {
assertEquals("FieldValue 1000 is not compatible with datatype "
+ "Array<string> (code: -1486737430).",
e.getMessage());
}
DataType intType = new ArrayDataType(DataType.INT);
Array<IntegerFieldValue> intArray = new Array<>(intType);
intArray.add(new IntegerFieldValue(42));
Array<StringFieldValue> stringArray = new Array<>(stringType);
try {
stringArray.assign(intArray);
fail("Expected an exception");
} catch (IllegalArgumentException e) {
assertEquals("Incompatible data types. Got datatype int (code: 0),"
+ " expected datatype string (code: 2)",
e.getMessage());
}
}
@Test
@Test
public void testListWrapperToArray() {
Array<StringFieldValue> array = new Array<>(new ArrayDataType(DataType.STRING));
List<StringFieldValue> assignFrom = new ArrayList<>(3);
assignFrom.add(new StringFieldValue("a"));
assignFrom.add(new StringFieldValue("b"));
assignFrom.add(new StringFieldValue("c"));
array.assign(assignFrom);
final StringFieldValue[] expected = new StringFieldValue[] { new StringFieldValue("a"), new StringFieldValue("b"),
new StringFieldValue("c") };
assertTrue(Arrays.equals(expected, array.toArray(new StringFieldValue[0])));
}
@Test
public void testEquals() {
Array<StringFieldValue> a = new Array<>(new ArrayDataType(DataType.STRING));
a.add(new StringFieldValue("mumbo jumbo 1"));
a.add(new StringFieldValue("mumbo jumbo 2"));
Array<StringFieldValue> b = new Array<>(new ArrayDataType(DataType.STRING));
b.add(new StringFieldValue("mumbo jumbo 1"));
b.add(new StringFieldValue("mumbo jumbo 2"));
assertEquals(a, b);
assertEquals(b, a);
assertEquals(0, a.compareTo(b));
assertEquals(0, b.compareTo(a));
b.clear();
List<String> l = new ArrayList<>();
l.add("mumbo jumbo 1");
l.add("mumbo jumbo 2");
b.assign(l);
assertEquals(a, b);
assertEquals(b, a);
assertEquals(0, a.compareTo(b));
assertEquals(0, b.compareTo(a));
}
@Test
public void testLess() {
Array<StringFieldValue> a = new Array<>(new ArrayDataType(DataType.STRING));
a.add(new StringFieldValue("mumbo jumbo 1"));
a.add(new StringFieldValue("mumbo jumbo 3"));
Array<StringFieldValue> b = new Array<>(new ArrayDataType(DataType.STRING));
b.add(new StringFieldValue("mumbo jumbo 1"));
b.add(new StringFieldValue("mumbo jumbo 2"));
assertNotEquals(a, b);
assertNotEquals(b, a);
assertEquals(1, a.compareTo(b));
assertEquals(-1, b.compareTo(a));
b.clear();
List<String> l = new ArrayList<>();
l.add("mumbo jumbo 1");
l.add("mumbo jumbo 2");
b.assign(l);
assertNotEquals(a, b);
assertNotEquals(b, a);
assertEquals(1, a.compareTo(b));
assertEquals(-1, b.compareTo(a));
}
} |
🤯 | public long getConnectedAt(TimeUnit unit) {
return parentRequest.getConnectedAt(unit);
} | return parentRequest.getConnectedAt(unit); | public long getConnectedAt(TimeUnit unit) {
return parentRequest.getConnectedAt(unit);
} | class Builder {
private final HttpRequest parent;
private com.yahoo.jdisc.http.HttpRequest jdiscRequest;
Method method = null;
Version version = null;
Map<String, String> properties = new HashMap<>();
InputStream requestData = null;
URI uri = null;
CurrentContainer container = null;
private static final String nag = " must be set before the attempted operation.";
SocketAddress remoteAddress;
private void boom(Object ref, String what) {
if (ref == null) {
throw new IllegalStateException(what + nag);
}
}
private void requireUri() {
boom(uri, "An URI");
}
private void requireContainer() {
boom(container, "A CurrentContainer instance");
}
private void ensureJdiscParent() {
if (jdiscRequest == null) {
if (parent == null) {
throw new IllegalStateException("Neither another HttpRequest nor JDisc request available.");
} else {
jdiscRequest = parent.getJDiscRequest();
}
}
}
private void ensureRequestData() {
if (requestData == null) {
if (parent == null) {
throw new IllegalStateException(
"Neither another HttpRequest nor request data input stream available.");
} else {
requestData = parent.getData();
}
}
}
/**
* Instantiate a request builder with defaults from an existing request.
* If the request is null, a JDisc request must be set explitly using
* {@link
* instantiating any HTTP request.
*
* @param request source for defaults and parent JDisc request, may be null
* @see HttpRequest
*/
public Builder(HttpRequest request) {
this(request, request.getJDiscRequest());
}
/**
* Instantiate a request builder with defaults from an existing request.
*
* @param request parent JDisc request
* @see HttpRequest
*/
public Builder(com.yahoo.jdisc.http.HttpRequest request) {
this(null, request);
}
private Builder(HttpRequest parent, com.yahoo.jdisc.http.HttpRequest jdiscRequest) {
this.parent = parent;
this.jdiscRequest = jdiscRequest;
populateProperties();
}
private void populateProperties() {
if (parent == null) return;
properties.putAll(parent.propertyMap());
}
/**
* Add a parameter to the request. Multi-value parameters are not supported.
*
* @param key parameter name
* @param value parameter value
* @return this Builder instance
*/
public Builder put(String key, String value) {
properties.put(key, value);
return this;
}
/**
* Removes the parameter from the request properties.
* If there is no such parameter, nothing will be done.
*/
public Builder removeProperty(String parameterName) {
properties.remove(parameterName);
return this;
}
/**
* Set the HTTP method for the new request.
*
* @param method the HTTP method to use for the new request
* @return this Builder instance
*/
public Builder method(Method method) {
this.method = method;
return this;
}
/**
* Define the JDisc parent request.
*
* @param request a valid JDisc request for the current container
* @return this Builder instance
*/
public Builder jdiscRequest(com.yahoo.jdisc.http.HttpRequest request) {
this.jdiscRequest = request;
return this;
}
/**
* Set an inputstream to use for the request. If not set, the data from
* the original HttpRequest is used.
*
* @param requestData data to be consumed, e.g. POST data
* @return this Builder instance
*/
public Builder requestData(InputStream requestData) {
this.requestData = requestData;
return this;
}
/**
* Set the URI of the server request created.
*
* @param uri a valid URI for a server request
* @return this Builder instance
*/
public Builder uri(URI uri) {
this.uri = uri;
return this;
}
/**
* Create a new HTTP request without creating a new JDisc request. This
* is for scenarios where another HTTP request handler is invoked
* directly without dispatching through JDisc. The parent JDisc request
* for the original HttpRequest will be passed on the new HttpRequest
* instance's JDisc request, but no properties will be propagated into
* the original JDisc request.
*
* @return a new HttpRequest instance reflecting the given request data and parameters
*/
public HttpRequest createDirectRequest() {
ensureRequestData();
ensureJdiscParent();
return new HttpRequest(jdiscRequest, requestData, properties);
}
/**
* Start of API for synchronous HTTP request dispatch. Not yet ready for use.
*
* @return a new client request
*/
public HttpRequest createClientRequest() {
ensureJdiscParent();
requireUri();
com.yahoo.jdisc.http.HttpRequest clientRequest;
if (method == null) {
clientRequest = com.yahoo.jdisc.http.HttpRequest
.newClientRequest(jdiscRequest, uri);
} else {
if (version == null) {
clientRequest = com.yahoo.jdisc.http.HttpRequest
.newClientRequest(jdiscRequest, uri, method);
} else {
clientRequest = com.yahoo.jdisc.http.HttpRequest
.newClientRequest(jdiscRequest, uri, method,
version);
}
}
setParameters(clientRequest);
return new HttpRequest(clientRequest, requestData, properties);
}
/**
* Start of API for synchronous HTTP request dispatch. Not yet ready for use.
*
* @return a new server request
*/
public HttpRequest createServerRequest() {
requireUri();
requireContainer();
com.yahoo.jdisc.http.HttpRequest serverRequest;
if (method == null) {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri);
} else {
if (version == null) {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri, method);
} else {
if (remoteAddress == null) {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri, method,
version);
} else {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri, method,
version, remoteAddress);
}
}
}
setParameters(serverRequest);
return new HttpRequest(serverRequest, requestData, properties);
}
private void setParameters(com.yahoo.jdisc.http.HttpRequest request) {
for (Map.Entry<String, String> entry : properties.entrySet()) {
request.parameters().put(entry.getKey(), wrap(entry.getValue()));
}
}
} | class Builder {
private final HttpRequest parent;
private com.yahoo.jdisc.http.HttpRequest jdiscRequest;
Method method = null;
Version version = null;
Map<String, String> properties = new HashMap<>();
InputStream requestData = null;
URI uri = null;
CurrentContainer container = null;
private static final String nag = " must be set before the attempted operation.";
SocketAddress remoteAddress;
private void boom(Object ref, String what) {
if (ref == null) {
throw new IllegalStateException(what + nag);
}
}
private void requireUri() {
boom(uri, "An URI");
}
private void requireContainer() {
boom(container, "A CurrentContainer instance");
}
private void ensureJdiscParent() {
if (jdiscRequest == null) {
if (parent == null) {
throw new IllegalStateException("Neither another HttpRequest nor JDisc request available.");
} else {
jdiscRequest = parent.getJDiscRequest();
}
}
}
private void ensureRequestData() {
if (requestData == null) {
if (parent == null) {
throw new IllegalStateException(
"Neither another HttpRequest nor request data input stream available.");
} else {
requestData = parent.getData();
}
}
}
/**
* Instantiate a request builder with defaults from an existing request.
* If the request is null, a JDisc request must be set explitly using
* {@link
* instantiating any HTTP request.
*
* @param request source for defaults and parent JDisc request, may be null
* @see HttpRequest
*/
public Builder(HttpRequest request) {
this(request, request.getJDiscRequest());
}
/**
* Instantiate a request builder with defaults from an existing request.
*
* @param request parent JDisc request
* @see HttpRequest
*/
public Builder(com.yahoo.jdisc.http.HttpRequest request) {
this(null, request);
}
private Builder(HttpRequest parent, com.yahoo.jdisc.http.HttpRequest jdiscRequest) {
this.parent = parent;
this.jdiscRequest = jdiscRequest;
populateProperties();
}
private void populateProperties() {
if (parent == null) return;
properties.putAll(parent.propertyMap());
}
/**
* Add a parameter to the request. Multi-value parameters are not supported.
*
* @param key parameter name
* @param value parameter value
* @return this Builder instance
*/
public Builder put(String key, String value) {
properties.put(key, value);
return this;
}
/**
* Removes the parameter from the request properties.
* If there is no such parameter, nothing will be done.
*/
public Builder removeProperty(String parameterName) {
properties.remove(parameterName);
return this;
}
/**
* Set the HTTP method for the new request.
*
* @param method the HTTP method to use for the new request
* @return this Builder instance
*/
public Builder method(Method method) {
this.method = method;
return this;
}
/**
* Define the JDisc parent request.
*
* @param request a valid JDisc request for the current container
* @return this Builder instance
*/
public Builder jdiscRequest(com.yahoo.jdisc.http.HttpRequest request) {
this.jdiscRequest = request;
return this;
}
/**
* Set an inputstream to use for the request. If not set, the data from
* the original HttpRequest is used.
*
* @param requestData data to be consumed, e.g. POST data
* @return this Builder instance
*/
public Builder requestData(InputStream requestData) {
this.requestData = requestData;
return this;
}
/**
* Set the URI of the server request created.
*
* @param uri a valid URI for a server request
* @return this Builder instance
*/
public Builder uri(URI uri) {
this.uri = uri;
return this;
}
/**
* Create a new HTTP request without creating a new JDisc request. This
* is for scenarios where another HTTP request handler is invoked
* directly without dispatching through JDisc. The parent JDisc request
* for the original HttpRequest will be passed on the new HttpRequest
* instance's JDisc request, but no properties will be propagated into
* the original JDisc request.
*
* @return a new HttpRequest instance reflecting the given request data and parameters
*/
public HttpRequest createDirectRequest() {
ensureRequestData();
ensureJdiscParent();
return new HttpRequest(jdiscRequest, requestData, properties);
}
/**
* Start of API for synchronous HTTP request dispatch. Not yet ready for use.
*
* @return a new client request
*/
public HttpRequest createClientRequest() {
ensureJdiscParent();
requireUri();
com.yahoo.jdisc.http.HttpRequest clientRequest;
if (method == null) {
clientRequest = com.yahoo.jdisc.http.HttpRequest
.newClientRequest(jdiscRequest, uri);
} else {
if (version == null) {
clientRequest = com.yahoo.jdisc.http.HttpRequest
.newClientRequest(jdiscRequest, uri, method);
} else {
clientRequest = com.yahoo.jdisc.http.HttpRequest
.newClientRequest(jdiscRequest, uri, method,
version);
}
}
setParameters(clientRequest);
return new HttpRequest(clientRequest, requestData, properties);
}
/**
* Start of API for synchronous HTTP request dispatch. Not yet ready for use.
*
* @return a new server request
*/
public HttpRequest createServerRequest() {
requireUri();
requireContainer();
com.yahoo.jdisc.http.HttpRequest serverRequest;
if (method == null) {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri);
} else {
if (version == null) {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri, method);
} else {
if (remoteAddress == null) {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri, method,
version);
} else {
serverRequest = com.yahoo.jdisc.http.HttpRequest
.newServerRequest(container, uri, method,
version, remoteAddress);
}
}
}
setParameters(serverRequest);
return new HttpRequest(serverRequest, requestData, properties);
}
private void setParameters(com.yahoo.jdisc.http.HttpRequest request) {
for (Map.Entry<String, String> entry : properties.entrySet()) {
request.parameters().put(entry.getKey(), wrap(entry.getValue()));
}
}
} |
Metrics gathering is done every 1 minute so the interval is typically set just shy of 60 seconds. Why 100 ms? | public JobRunner(Controller controller, Duration duration, ExecutorService executors, StepRunner runner) {
this(controller, duration, executors, runner, new Metrics(controller.metric(), Duration.ofMillis(100)));
} | this(controller, duration, executors, runner, new Metrics(controller.metric(), Duration.ofMillis(100))); | public JobRunner(Controller controller, Duration duration, ExecutorService executors, StepRunner runner) {
this(controller, duration, executors, runner, new Metrics(controller.metric(), Duration.ofMillis(100)));
} | class JobRunner extends ControllerMaintainer {
public static final Duration jobTimeout = Duration.ofDays(1).plusHours(1);
private static final Logger log = Logger.getLogger(JobRunner.class.getName());
private final JobController jobs;
private final ExecutorService executors;
private final StepRunner runner;
private final Metrics metrics;
public JobRunner(Controller controller, Duration duration) {
this(controller, duration, Executors.newFixedThreadPool(32, new DaemonThreadFactory("job-runner-")),
new InternalStepRunner(controller));
}
JobRunner(Controller controller, Duration duration, ExecutorService executors, StepRunner runner, Metrics metrics) {
super(controller, duration);
this.jobs = controller.jobController();
this.jobs.setRunner(this::advance);
this.executors = executors;
this.runner = runner;
this.metrics = metrics;
}
@Override
protected double maintain() {
execute(() -> jobs.active().forEach(this::advance));
jobs.collectGarbage();
return 1.0;
}
@Override
public void shutdown() {
super.shutdown();
metrics.shutdown();
executors.shutdown();
}
@Override
public void awaitShutdown() {
super.awaitShutdown();
try {
if ( ! executors.awaitTermination(40, TimeUnit.SECONDS)) {
executors.shutdownNow();
if ( ! executors.awaitTermination(10, TimeUnit.SECONDS))
throw new IllegalStateException("Failed shutting down " + JobRunner.class.getName());
}
}
catch (InterruptedException e) {
log.log(Level.WARNING, "Interrupted during shutdown of " + JobRunner.class.getName(), e);
Thread.currentThread().interrupt();
}
}
public void advance(Run run) {
if ( ! jobs.isDisabled(run.id().job())) advance(run.id());
}
/** Advances each of the ready steps for the given run, or marks it as finished, and stashes it. Public for testing. */
public void advance(RunId id) {
jobs.locked(id, run -> {
if ( ! run.hasFailed()
&& controller().clock().instant().isAfter(run.sleepUntil().orElse(run.start()).plus(jobTimeout)))
execute(() -> {
jobs.abort(run.id(), "job timeout of " + jobTimeout + " reached", false);
advance(run.id());
});
else if (run.readySteps().isEmpty())
execute(() -> finish(run.id()));
else if (run.hasFailed() || run.sleepUntil().map(sleepUntil -> ! sleepUntil.isAfter(controller().clock().instant())).orElse(true))
run.readySteps().forEach(step -> execute(() -> advance(run.id(), step)));
return null;
});
}
private void finish(RunId id) {
try {
jobs.finish(id);
if ( ! id.type().environment().isManuallyDeployed())
controller().applications().deploymentTrigger().notifyOfCompletion(id.application());
}
catch (TimeoutException e) {
}
catch (Exception e) {
log.log(Level.WARNING, "Exception finishing " + id, e);
}
}
/** Attempts to advance the status of the given step, for the given run. */
private void advance(RunId id, Step step) {
try {
AtomicBoolean changed = new AtomicBoolean(false);
jobs.locked(id.application(), id.type(), step, lockedStep -> {
jobs.locked(id, run -> {
if ( ! run.readySteps().contains(step)) {
changed.set(true);
return run;
}
if (run.stepInfo(lockedStep.get()).orElseThrow().startTime().isEmpty())
run = run.with(controller().clock().instant(), lockedStep);
return run;
});
if ( ! changed.get()) {
runner.run(lockedStep, id).ifPresent(status -> {
jobs.update(id, status, lockedStep);
changed.set(true);
});
}
});
if (changed.get())
jobs.active(id).ifPresent(this::advance);
}
catch (TimeoutException e) {
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Exception attempting to advance " + step + " of " + id, e);
}
}
private void execute(Runnable task) {
metrics.queued.incrementAndGet();
executors.execute(() -> {
metrics.queued.decrementAndGet();
metrics.active.incrementAndGet();
try { task.run(); }
finally { metrics.active.decrementAndGet(); }
});
}
static class Metrics {
private final AtomicInteger queued = new AtomicInteger();
private final AtomicInteger active = new AtomicInteger();
private final ScheduledExecutorService reporter = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("job-runner-metrics-"));
private final Metric metric;
private final Metric.Context context;
Metrics(Metric metric, Duration interval) {
this.metric = metric;
this.context = metric.createContext(Map.of());
reporter.scheduleAtFixedRate(this::report, interval.toMillis(), interval.toMillis(), TimeUnit.MILLISECONDS);
}
void report() {
metric.set(ControllerMetrics.DEPLOYMENT_JOBS_QUEUED.baseName(), queued.get(), context);
metric.set(ControllerMetrics.DEPLOYMENT_JOBS_ACTIVE.baseName(), active.get(), context);
}
void shutdown() {
reporter.shutdown();
}
}
} | class JobRunner extends ControllerMaintainer {
public static final Duration jobTimeout = Duration.ofDays(1).plusHours(1);
private static final Logger log = Logger.getLogger(JobRunner.class.getName());
private final JobController jobs;
private final ExecutorService executors;
private final StepRunner runner;
private final Metrics metrics;
public JobRunner(Controller controller, Duration duration) {
this(controller, duration, Executors.newFixedThreadPool(32, new DaemonThreadFactory("job-runner-")),
new InternalStepRunner(controller));
}
JobRunner(Controller controller, Duration duration, ExecutorService executors, StepRunner runner, Metrics metrics) {
super(controller, duration);
this.jobs = controller.jobController();
this.jobs.setRunner(this::advance);
this.executors = executors;
this.runner = runner;
this.metrics = metrics;
}
@Override
protected double maintain() {
execute(() -> jobs.active().forEach(this::advance));
jobs.collectGarbage();
return 1.0;
}
@Override
public void shutdown() {
super.shutdown();
metrics.shutdown();
executors.shutdown();
}
@Override
public void awaitShutdown() {
super.awaitShutdown();
try {
if ( ! executors.awaitTermination(40, TimeUnit.SECONDS)) {
executors.shutdownNow();
if ( ! executors.awaitTermination(10, TimeUnit.SECONDS))
throw new IllegalStateException("Failed shutting down " + JobRunner.class.getName());
}
}
catch (InterruptedException e) {
log.log(Level.WARNING, "Interrupted during shutdown of " + JobRunner.class.getName(), e);
Thread.currentThread().interrupt();
}
}
public void advance(Run run) {
if ( ! jobs.isDisabled(run.id().job())) advance(run.id());
}
/** Advances each of the ready steps for the given run, or marks it as finished, and stashes it. Public for testing. */
public void advance(RunId id) {
jobs.locked(id, run -> {
if ( ! run.hasFailed()
&& controller().clock().instant().isAfter(run.sleepUntil().orElse(run.start()).plus(jobTimeout)))
execute(() -> {
jobs.abort(run.id(), "job timeout of " + jobTimeout + " reached", false);
advance(run.id());
});
else if (run.readySteps().isEmpty())
execute(() -> finish(run.id()));
else if (run.hasFailed() || run.sleepUntil().map(sleepUntil -> ! sleepUntil.isAfter(controller().clock().instant())).orElse(true))
run.readySteps().forEach(step -> execute(() -> advance(run.id(), step)));
return null;
});
}
private void finish(RunId id) {
try {
jobs.finish(id);
if ( ! id.type().environment().isManuallyDeployed())
controller().applications().deploymentTrigger().notifyOfCompletion(id.application());
}
catch (TimeoutException e) {
}
catch (Exception e) {
log.log(Level.WARNING, "Exception finishing " + id, e);
}
}
/** Attempts to advance the status of the given step, for the given run. */
private void advance(RunId id, Step step) {
try {
AtomicBoolean changed = new AtomicBoolean(false);
jobs.locked(id.application(), id.type(), step, lockedStep -> {
jobs.locked(id, run -> {
if ( ! run.readySteps().contains(step)) {
changed.set(true);
return run;
}
if (run.stepInfo(lockedStep.get()).orElseThrow().startTime().isEmpty())
run = run.with(controller().clock().instant(), lockedStep);
return run;
});
if ( ! changed.get()) {
runner.run(lockedStep, id).ifPresent(status -> {
jobs.update(id, status, lockedStep);
changed.set(true);
});
}
});
if (changed.get())
jobs.active(id).ifPresent(this::advance);
}
catch (TimeoutException e) {
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Exception attempting to advance " + step + " of " + id, e);
}
}
private void execute(Runnable task) {
metrics.queued.incrementAndGet();
executors.execute(() -> {
metrics.queued.decrementAndGet();
metrics.active.incrementAndGet();
try { task.run(); }
finally { metrics.active.decrementAndGet(); }
});
}
static class Metrics {
private final AtomicInteger queued = new AtomicInteger();
private final AtomicInteger active = new AtomicInteger();
private final ScheduledExecutorService reporter = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("job-runner-metrics-"));
private final Metric metric;
private final Metric.Context context;
Metrics(Metric metric, Duration interval) {
this.metric = metric;
this.context = metric.createContext(Map.of());
reporter.scheduleAtFixedRate(this::report, interval.toMillis(), interval.toMillis(), TimeUnit.MILLISECONDS);
}
void report() {
metric.set(ControllerMetrics.DEPLOYMENT_JOBS_QUEUED.baseName(), queued.get(), context);
metric.set(ControllerMetrics.DEPLOYMENT_JOBS_ACTIVE.baseName(), active.get(), context);
}
void shutdown() {
reporter.shutdown();
}
}
} |
This gives our metrics framework something to chew on. A uniform sampling every 100ms, and sum and count of those samples, provides a decent measurement of the underlying metric. If we were to sample only once every 60s, we could only hope for a _very_ coarse approximation, and due to metrics collection, and job runner maintenance, happening with intervals with a huge GCD relative to the volatility of what we measure, we wouldn't even get that. | public JobRunner(Controller controller, Duration duration, ExecutorService executors, StepRunner runner) {
this(controller, duration, executors, runner, new Metrics(controller.metric(), Duration.ofMillis(100)));
} | this(controller, duration, executors, runner, new Metrics(controller.metric(), Duration.ofMillis(100))); | public JobRunner(Controller controller, Duration duration, ExecutorService executors, StepRunner runner) {
this(controller, duration, executors, runner, new Metrics(controller.metric(), Duration.ofMillis(100)));
} | class JobRunner extends ControllerMaintainer {
public static final Duration jobTimeout = Duration.ofDays(1).plusHours(1);
private static final Logger log = Logger.getLogger(JobRunner.class.getName());
private final JobController jobs;
private final ExecutorService executors;
private final StepRunner runner;
private final Metrics metrics;
public JobRunner(Controller controller, Duration duration) {
this(controller, duration, Executors.newFixedThreadPool(32, new DaemonThreadFactory("job-runner-")),
new InternalStepRunner(controller));
}
JobRunner(Controller controller, Duration duration, ExecutorService executors, StepRunner runner, Metrics metrics) {
super(controller, duration);
this.jobs = controller.jobController();
this.jobs.setRunner(this::advance);
this.executors = executors;
this.runner = runner;
this.metrics = metrics;
}
@Override
protected double maintain() {
execute(() -> jobs.active().forEach(this::advance));
jobs.collectGarbage();
return 1.0;
}
@Override
public void shutdown() {
super.shutdown();
metrics.shutdown();
executors.shutdown();
}
@Override
public void awaitShutdown() {
super.awaitShutdown();
try {
if ( ! executors.awaitTermination(40, TimeUnit.SECONDS)) {
executors.shutdownNow();
if ( ! executors.awaitTermination(10, TimeUnit.SECONDS))
throw new IllegalStateException("Failed shutting down " + JobRunner.class.getName());
}
}
catch (InterruptedException e) {
log.log(Level.WARNING, "Interrupted during shutdown of " + JobRunner.class.getName(), e);
Thread.currentThread().interrupt();
}
}
public void advance(Run run) {
if ( ! jobs.isDisabled(run.id().job())) advance(run.id());
}
/** Advances each of the ready steps for the given run, or marks it as finished, and stashes it. Public for testing. */
public void advance(RunId id) {
jobs.locked(id, run -> {
if ( ! run.hasFailed()
&& controller().clock().instant().isAfter(run.sleepUntil().orElse(run.start()).plus(jobTimeout)))
execute(() -> {
jobs.abort(run.id(), "job timeout of " + jobTimeout + " reached", false);
advance(run.id());
});
else if (run.readySteps().isEmpty())
execute(() -> finish(run.id()));
else if (run.hasFailed() || run.sleepUntil().map(sleepUntil -> ! sleepUntil.isAfter(controller().clock().instant())).orElse(true))
run.readySteps().forEach(step -> execute(() -> advance(run.id(), step)));
return null;
});
}
private void finish(RunId id) {
try {
jobs.finish(id);
if ( ! id.type().environment().isManuallyDeployed())
controller().applications().deploymentTrigger().notifyOfCompletion(id.application());
}
catch (TimeoutException e) {
}
catch (Exception e) {
log.log(Level.WARNING, "Exception finishing " + id, e);
}
}
/** Attempts to advance the status of the given step, for the given run. */
private void advance(RunId id, Step step) {
try {
AtomicBoolean changed = new AtomicBoolean(false);
jobs.locked(id.application(), id.type(), step, lockedStep -> {
jobs.locked(id, run -> {
if ( ! run.readySteps().contains(step)) {
changed.set(true);
return run;
}
if (run.stepInfo(lockedStep.get()).orElseThrow().startTime().isEmpty())
run = run.with(controller().clock().instant(), lockedStep);
return run;
});
if ( ! changed.get()) {
runner.run(lockedStep, id).ifPresent(status -> {
jobs.update(id, status, lockedStep);
changed.set(true);
});
}
});
if (changed.get())
jobs.active(id).ifPresent(this::advance);
}
catch (TimeoutException e) {
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Exception attempting to advance " + step + " of " + id, e);
}
}
private void execute(Runnable task) {
metrics.queued.incrementAndGet();
executors.execute(() -> {
metrics.queued.decrementAndGet();
metrics.active.incrementAndGet();
try { task.run(); }
finally { metrics.active.decrementAndGet(); }
});
}
static class Metrics {
private final AtomicInteger queued = new AtomicInteger();
private final AtomicInteger active = new AtomicInteger();
private final ScheduledExecutorService reporter = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("job-runner-metrics-"));
private final Metric metric;
private final Metric.Context context;
Metrics(Metric metric, Duration interval) {
this.metric = metric;
this.context = metric.createContext(Map.of());
reporter.scheduleAtFixedRate(this::report, interval.toMillis(), interval.toMillis(), TimeUnit.MILLISECONDS);
}
void report() {
metric.set(ControllerMetrics.DEPLOYMENT_JOBS_QUEUED.baseName(), queued.get(), context);
metric.set(ControllerMetrics.DEPLOYMENT_JOBS_ACTIVE.baseName(), active.get(), context);
}
void shutdown() {
reporter.shutdown();
}
}
} | class JobRunner extends ControllerMaintainer {
public static final Duration jobTimeout = Duration.ofDays(1).plusHours(1);
private static final Logger log = Logger.getLogger(JobRunner.class.getName());
private final JobController jobs;
private final ExecutorService executors;
private final StepRunner runner;
private final Metrics metrics;
public JobRunner(Controller controller, Duration duration) {
this(controller, duration, Executors.newFixedThreadPool(32, new DaemonThreadFactory("job-runner-")),
new InternalStepRunner(controller));
}
JobRunner(Controller controller, Duration duration, ExecutorService executors, StepRunner runner, Metrics metrics) {
super(controller, duration);
this.jobs = controller.jobController();
this.jobs.setRunner(this::advance);
this.executors = executors;
this.runner = runner;
this.metrics = metrics;
}
@Override
protected double maintain() {
execute(() -> jobs.active().forEach(this::advance));
jobs.collectGarbage();
return 1.0;
}
@Override
public void shutdown() {
super.shutdown();
metrics.shutdown();
executors.shutdown();
}
@Override
public void awaitShutdown() {
super.awaitShutdown();
try {
if ( ! executors.awaitTermination(40, TimeUnit.SECONDS)) {
executors.shutdownNow();
if ( ! executors.awaitTermination(10, TimeUnit.SECONDS))
throw new IllegalStateException("Failed shutting down " + JobRunner.class.getName());
}
}
catch (InterruptedException e) {
log.log(Level.WARNING, "Interrupted during shutdown of " + JobRunner.class.getName(), e);
Thread.currentThread().interrupt();
}
}
public void advance(Run run) {
if ( ! jobs.isDisabled(run.id().job())) advance(run.id());
}
/** Advances each of the ready steps for the given run, or marks it as finished, and stashes it. Public for testing. */
public void advance(RunId id) {
jobs.locked(id, run -> {
if ( ! run.hasFailed()
&& controller().clock().instant().isAfter(run.sleepUntil().orElse(run.start()).plus(jobTimeout)))
execute(() -> {
jobs.abort(run.id(), "job timeout of " + jobTimeout + " reached", false);
advance(run.id());
});
else if (run.readySteps().isEmpty())
execute(() -> finish(run.id()));
else if (run.hasFailed() || run.sleepUntil().map(sleepUntil -> ! sleepUntil.isAfter(controller().clock().instant())).orElse(true))
run.readySteps().forEach(step -> execute(() -> advance(run.id(), step)));
return null;
});
}
private void finish(RunId id) {
try {
jobs.finish(id);
if ( ! id.type().environment().isManuallyDeployed())
controller().applications().deploymentTrigger().notifyOfCompletion(id.application());
}
catch (TimeoutException e) {
}
catch (Exception e) {
log.log(Level.WARNING, "Exception finishing " + id, e);
}
}
/** Attempts to advance the status of the given step, for the given run. */
private void advance(RunId id, Step step) {
try {
AtomicBoolean changed = new AtomicBoolean(false);
jobs.locked(id.application(), id.type(), step, lockedStep -> {
jobs.locked(id, run -> {
if ( ! run.readySteps().contains(step)) {
changed.set(true);
return run;
}
if (run.stepInfo(lockedStep.get()).orElseThrow().startTime().isEmpty())
run = run.with(controller().clock().instant(), lockedStep);
return run;
});
if ( ! changed.get()) {
runner.run(lockedStep, id).ifPresent(status -> {
jobs.update(id, status, lockedStep);
changed.set(true);
});
}
});
if (changed.get())
jobs.active(id).ifPresent(this::advance);
}
catch (TimeoutException e) {
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Exception attempting to advance " + step + " of " + id, e);
}
}
private void execute(Runnable task) {
metrics.queued.incrementAndGet();
executors.execute(() -> {
metrics.queued.decrementAndGet();
metrics.active.incrementAndGet();
try { task.run(); }
finally { metrics.active.decrementAndGet(); }
});
}
static class Metrics {
private final AtomicInteger queued = new AtomicInteger();
private final AtomicInteger active = new AtomicInteger();
private final ScheduledExecutorService reporter = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("job-runner-metrics-"));
private final Metric metric;
private final Metric.Context context;
Metrics(Metric metric, Duration interval) {
this.metric = metric;
this.context = metric.createContext(Map.of());
reporter.scheduleAtFixedRate(this::report, interval.toMillis(), interval.toMillis(), TimeUnit.MILLISECONDS);
}
void report() {
metric.set(ControllerMetrics.DEPLOYMENT_JOBS_QUEUED.baseName(), queued.get(), context);
metric.set(ControllerMetrics.DEPLOYMENT_JOBS_ACTIVE.baseName(), active.get(), context);
}
void shutdown() {
reporter.shutdown();
}
}
} |
Metric.set has count, last, max, min, and sum. | public JobRunner(Controller controller, Duration duration, ExecutorService executors, StepRunner runner) {
this(controller, duration, executors, runner, new Metrics(controller.metric(), Duration.ofMillis(100)));
} | this(controller, duration, executors, runner, new Metrics(controller.metric(), Duration.ofMillis(100))); | public JobRunner(Controller controller, Duration duration, ExecutorService executors, StepRunner runner) {
this(controller, duration, executors, runner, new Metrics(controller.metric(), Duration.ofMillis(100)));
} | class JobRunner extends ControllerMaintainer {
public static final Duration jobTimeout = Duration.ofDays(1).plusHours(1);
private static final Logger log = Logger.getLogger(JobRunner.class.getName());
private final JobController jobs;
private final ExecutorService executors;
private final StepRunner runner;
private final Metrics metrics;
public JobRunner(Controller controller, Duration duration) {
this(controller, duration, Executors.newFixedThreadPool(32, new DaemonThreadFactory("job-runner-")),
new InternalStepRunner(controller));
}
JobRunner(Controller controller, Duration duration, ExecutorService executors, StepRunner runner, Metrics metrics) {
super(controller, duration);
this.jobs = controller.jobController();
this.jobs.setRunner(this::advance);
this.executors = executors;
this.runner = runner;
this.metrics = metrics;
}
@Override
protected double maintain() {
execute(() -> jobs.active().forEach(this::advance));
jobs.collectGarbage();
return 1.0;
}
@Override
public void shutdown() {
super.shutdown();
metrics.shutdown();
executors.shutdown();
}
@Override
public void awaitShutdown() {
super.awaitShutdown();
try {
if ( ! executors.awaitTermination(40, TimeUnit.SECONDS)) {
executors.shutdownNow();
if ( ! executors.awaitTermination(10, TimeUnit.SECONDS))
throw new IllegalStateException("Failed shutting down " + JobRunner.class.getName());
}
}
catch (InterruptedException e) {
log.log(Level.WARNING, "Interrupted during shutdown of " + JobRunner.class.getName(), e);
Thread.currentThread().interrupt();
}
}
public void advance(Run run) {
if ( ! jobs.isDisabled(run.id().job())) advance(run.id());
}
/** Advances each of the ready steps for the given run, or marks it as finished, and stashes it. Public for testing. */
public void advance(RunId id) {
jobs.locked(id, run -> {
if ( ! run.hasFailed()
&& controller().clock().instant().isAfter(run.sleepUntil().orElse(run.start()).plus(jobTimeout)))
execute(() -> {
jobs.abort(run.id(), "job timeout of " + jobTimeout + " reached", false);
advance(run.id());
});
else if (run.readySteps().isEmpty())
execute(() -> finish(run.id()));
else if (run.hasFailed() || run.sleepUntil().map(sleepUntil -> ! sleepUntil.isAfter(controller().clock().instant())).orElse(true))
run.readySteps().forEach(step -> execute(() -> advance(run.id(), step)));
return null;
});
}
private void finish(RunId id) {
try {
jobs.finish(id);
if ( ! id.type().environment().isManuallyDeployed())
controller().applications().deploymentTrigger().notifyOfCompletion(id.application());
}
catch (TimeoutException e) {
}
catch (Exception e) {
log.log(Level.WARNING, "Exception finishing " + id, e);
}
}
/** Attempts to advance the status of the given step, for the given run. */
private void advance(RunId id, Step step) {
try {
AtomicBoolean changed = new AtomicBoolean(false);
jobs.locked(id.application(), id.type(), step, lockedStep -> {
jobs.locked(id, run -> {
if ( ! run.readySteps().contains(step)) {
changed.set(true);
return run;
}
if (run.stepInfo(lockedStep.get()).orElseThrow().startTime().isEmpty())
run = run.with(controller().clock().instant(), lockedStep);
return run;
});
if ( ! changed.get()) {
runner.run(lockedStep, id).ifPresent(status -> {
jobs.update(id, status, lockedStep);
changed.set(true);
});
}
});
if (changed.get())
jobs.active(id).ifPresent(this::advance);
}
catch (TimeoutException e) {
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Exception attempting to advance " + step + " of " + id, e);
}
}
private void execute(Runnable task) {
metrics.queued.incrementAndGet();
executors.execute(() -> {
metrics.queued.decrementAndGet();
metrics.active.incrementAndGet();
try { task.run(); }
finally { metrics.active.decrementAndGet(); }
});
}
static class Metrics {
private final AtomicInteger queued = new AtomicInteger();
private final AtomicInteger active = new AtomicInteger();
private final ScheduledExecutorService reporter = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("job-runner-metrics-"));
private final Metric metric;
private final Metric.Context context;
Metrics(Metric metric, Duration interval) {
this.metric = metric;
this.context = metric.createContext(Map.of());
reporter.scheduleAtFixedRate(this::report, interval.toMillis(), interval.toMillis(), TimeUnit.MILLISECONDS);
}
void report() {
metric.set(ControllerMetrics.DEPLOYMENT_JOBS_QUEUED.baseName(), queued.get(), context);
metric.set(ControllerMetrics.DEPLOYMENT_JOBS_ACTIVE.baseName(), active.get(), context);
}
void shutdown() {
reporter.shutdown();
}
}
} | class JobRunner extends ControllerMaintainer {
public static final Duration jobTimeout = Duration.ofDays(1).plusHours(1);
private static final Logger log = Logger.getLogger(JobRunner.class.getName());
private final JobController jobs;
private final ExecutorService executors;
private final StepRunner runner;
private final Metrics metrics;
public JobRunner(Controller controller, Duration duration) {
this(controller, duration, Executors.newFixedThreadPool(32, new DaemonThreadFactory("job-runner-")),
new InternalStepRunner(controller));
}
JobRunner(Controller controller, Duration duration, ExecutorService executors, StepRunner runner, Metrics metrics) {
super(controller, duration);
this.jobs = controller.jobController();
this.jobs.setRunner(this::advance);
this.executors = executors;
this.runner = runner;
this.metrics = metrics;
}
@Override
protected double maintain() {
execute(() -> jobs.active().forEach(this::advance));
jobs.collectGarbage();
return 1.0;
}
@Override
public void shutdown() {
super.shutdown();
metrics.shutdown();
executors.shutdown();
}
@Override
public void awaitShutdown() {
super.awaitShutdown();
try {
if ( ! executors.awaitTermination(40, TimeUnit.SECONDS)) {
executors.shutdownNow();
if ( ! executors.awaitTermination(10, TimeUnit.SECONDS))
throw new IllegalStateException("Failed shutting down " + JobRunner.class.getName());
}
}
catch (InterruptedException e) {
log.log(Level.WARNING, "Interrupted during shutdown of " + JobRunner.class.getName(), e);
Thread.currentThread().interrupt();
}
}
public void advance(Run run) {
if ( ! jobs.isDisabled(run.id().job())) advance(run.id());
}
/** Advances each of the ready steps for the given run, or marks it as finished, and stashes it. Public for testing. */
public void advance(RunId id) {
jobs.locked(id, run -> {
if ( ! run.hasFailed()
&& controller().clock().instant().isAfter(run.sleepUntil().orElse(run.start()).plus(jobTimeout)))
execute(() -> {
jobs.abort(run.id(), "job timeout of " + jobTimeout + " reached", false);
advance(run.id());
});
else if (run.readySteps().isEmpty())
execute(() -> finish(run.id()));
else if (run.hasFailed() || run.sleepUntil().map(sleepUntil -> ! sleepUntil.isAfter(controller().clock().instant())).orElse(true))
run.readySteps().forEach(step -> execute(() -> advance(run.id(), step)));
return null;
});
}
private void finish(RunId id) {
try {
jobs.finish(id);
if ( ! id.type().environment().isManuallyDeployed())
controller().applications().deploymentTrigger().notifyOfCompletion(id.application());
}
catch (TimeoutException e) {
}
catch (Exception e) {
log.log(Level.WARNING, "Exception finishing " + id, e);
}
}
/** Attempts to advance the status of the given step, for the given run. */
private void advance(RunId id, Step step) {
try {
AtomicBoolean changed = new AtomicBoolean(false);
jobs.locked(id.application(), id.type(), step, lockedStep -> {
jobs.locked(id, run -> {
if ( ! run.readySteps().contains(step)) {
changed.set(true);
return run;
}
if (run.stepInfo(lockedStep.get()).orElseThrow().startTime().isEmpty())
run = run.with(controller().clock().instant(), lockedStep);
return run;
});
if ( ! changed.get()) {
runner.run(lockedStep, id).ifPresent(status -> {
jobs.update(id, status, lockedStep);
changed.set(true);
});
}
});
if (changed.get())
jobs.active(id).ifPresent(this::advance);
}
catch (TimeoutException e) {
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Exception attempting to advance " + step + " of " + id, e);
}
}
private void execute(Runnable task) {
metrics.queued.incrementAndGet();
executors.execute(() -> {
metrics.queued.decrementAndGet();
metrics.active.incrementAndGet();
try { task.run(); }
finally { metrics.active.decrementAndGet(); }
});
}
static class Metrics {
private final AtomicInteger queued = new AtomicInteger();
private final AtomicInteger active = new AtomicInteger();
private final ScheduledExecutorService reporter = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("job-runner-metrics-"));
private final Metric metric;
private final Metric.Context context;
Metrics(Metric metric, Duration interval) {
this.metric = metric;
this.context = metric.createContext(Map.of());
reporter.scheduleAtFixedRate(this::report, interval.toMillis(), interval.toMillis(), TimeUnit.MILLISECONDS);
}
void report() {
metric.set(ControllerMetrics.DEPLOYMENT_JOBS_QUEUED.baseName(), queued.get(), context);
metric.set(ControllerMetrics.DEPLOYMENT_JOBS_ACTIVE.baseName(), active.get(), context);
}
void shutdown() {
reporter.shutdown();
}
}
} |
consider dropping null check | public Evaluator bind(String name, Tensor value) {
if (value != null) evaluator.bind(name, value);
return this;
} | if (value != null) evaluator.bind(name, value); | public Evaluator bind(String name, Tensor value) {
evaluator.bind(name, value);
return this;
} | class SimpleEvaluator implements Evaluator {
private final FunctionEvaluator evaluator;
static Supplier<Evaluator> wrap(Supplier<FunctionEvaluator> supplier) {
return () -> new SimpleEvaluator(supplier.get());
}
SimpleEvaluator(FunctionEvaluator prototype) {
this.evaluator = prototype;
}
@Override
@Override
public double evaluateScore() {
return evaluator.evaluate().asDouble();
}
@Override
public String toString() {
var buf = new StringBuilder();
buf.append("SimpleEvaluator(");
buf.append(evaluator.function().toString());
buf.append(")[");
for (String arg : evaluator.function().arguments()) {
buf.append("{").append(arg).append("}");
}
buf.append("]");
return buf.toString();
}
} | class SimpleEvaluator implements Evaluator {
private final FunctionEvaluator evaluator;
static Supplier<Evaluator> wrap(Supplier<FunctionEvaluator> supplier) {
return () -> new SimpleEvaluator(supplier.get());
}
SimpleEvaluator(FunctionEvaluator prototype) {
this.evaluator = prototype;
}
@Override
@Override
public double evaluateScore() {
return evaluator.evaluate().asDouble();
}
@Override
public String toString() {
var buf = new StringBuilder();
buf.append("SimpleEvaluator(");
buf.append(evaluator.function().toString());
buf.append(")[");
for (String arg : evaluator.function().arguments()) {
buf.append("{").append(arg).append("}");
}
buf.append("]");
return buf.toString();
}
} |
didRerank must be true; removing the need for an if here | private void runProcessing() {
int count = 0;
for (var iterator = hitsToRescore.iterator(); count < rerankCount && iterator.hasNext(); ) {
WrappedHit wrapped = iterator.next();
double oldScore = wrapped.getScore();
boolean didRerank = hitRescorer.rescoreHit(wrapped);
if (didRerank) {
ranges.withInitialScore(oldScore);
ranges.withFinalScore(wrapped.getScore());
++count;
iterator.remove();
}
}
} | boolean didRerank = hitRescorer.rescoreHit(wrapped); | private void runProcessing() {
int count = 0;
for (var iterator = hitsToRescore.iterator(); count < rerankCount && iterator.hasNext(); ) {
WrappedHit wrapped = iterator.next();
double oldScore = wrapped.getScore();
double newScore = hitRescorer.rescoreHit(wrapped);
ranges.withInitialScore(oldScore);
ranges.withFinalScore(newScore);
++count;
iterator.remove();
}
} | class ResultReranker {
private static final Logger logger = Logger.getLogger(ResultReranker.class.getName());
private final HitRescorer hitRescorer;
private final int rerankCount;
private final List<WrappedHit> hitsToRescore = new ArrayList<>();
private final RangeAdjuster ranges = new RangeAdjuster();
ResultReranker(HitRescorer hitRescorer, int rerankCount) {
this.hitRescorer = hitRescorer;
this.rerankCount = rerankCount;
}
void rerankHits(Result result) {
gatherHits(result);
runPreProcessing();
hitRescorer.runNormalizers();
runProcessing();
runPostProcessing();
result.hits().sort();
}
private void gatherHits(Result result) {
for (var iterator = result.hits().deepIterator(); iterator.hasNext();) {
Hit hit = iterator.next();
if (hit.isMeta() || hit instanceof HitGroup) {
continue;
}
var wrapped = WrappedHit.from(hit);
if (wrapped != null) hitsToRescore.add(wrapped);
}
}
private void runPreProcessing() {
hitsToRescore.sort(Comparator.naturalOrder());
int count = 0;
for (WrappedHit hit : hitsToRescore) {
if (count == rerankCount) break;
hitRescorer.preprocess(hit);
++count;
}
}
private void runPostProcessing() {
if (ranges.rescaleNeeded() && ! hitsToRescore.isEmpty()) {
double scale = ranges.scale();
double bias = ranges.bias();
for (WrappedHit wrapped : hitsToRescore) {
double oldScore = wrapped.getScore();
wrapped.setScore(oldScore * scale + bias);
}
}
}
} | class ResultReranker {
private static final Logger logger = Logger.getLogger(ResultReranker.class.getName());
private final HitRescorer hitRescorer;
private final int rerankCount;
private final List<WrappedHit> hitsToRescore = new ArrayList<>();
private final RangeAdjuster ranges = new RangeAdjuster();
ResultReranker(HitRescorer hitRescorer, int rerankCount) {
this.hitRescorer = hitRescorer;
this.rerankCount = rerankCount;
}
void rerankHits(Result result) {
gatherHits(result);
runPreProcessing();
hitRescorer.runNormalizers();
runProcessing();
runPostProcessing();
result.hits().sort();
}
private void gatherHits(Result result) {
for (var iterator = result.hits().deepIterator(); iterator.hasNext();) {
Hit hit = iterator.next();
if (hit.isMeta() || hit instanceof HitGroup) {
continue;
}
var wrapped = WrappedHit.from(hit);
if (wrapped != null) hitsToRescore.add(wrapped);
}
}
private void runPreProcessing() {
hitsToRescore.sort(Comparator.naturalOrder());
int count = 0;
for (WrappedHit hit : hitsToRescore) {
if (count == rerankCount) break;
hitRescorer.preprocess(hit);
++count;
}
}
private void runPostProcessing() {
if (ranges.rescaleNeeded() && ! hitsToRescore.isEmpty()) {
double scale = ranges.scale();
double bias = ranges.bias();
for (WrappedHit wrapped : hitsToRescore) {
double oldScore = wrapped.getScore();
wrapped.setScore(oldScore * scale + bias);
}
}
}
} |
Should this be after the next line? | private int resolveRerankCount(GlobalPhaseSetup setup, Query query) {
if (setup == null) return 0;
Integer override = query.getRanking().getGlobalPhase().getRerankCount();
if (override != null) {
return override;
}
return setup.rerankCount;
} | if (setup == null) return 0; | private int resolveRerankCount(GlobalPhaseSetup setup, Query query) {
if (setup == null) {
return 0;
}
Integer override = query.getRanking().getGlobalPhase().getRerankCount();
if (override != null) {
return override;
}
return setup.rerankCount;
} | class GlobalPhaseRanker {
private static final Logger logger = Logger.getLogger(GlobalPhaseRanker.class.getName());
private final RankProfilesEvaluatorFactory factory;
@Inject
public GlobalPhaseRanker(RankProfilesEvaluatorFactory factory) {
this.factory = factory;
logger.fine(() -> "Using factory: " + factory);
}
public int getRerankCount(Query query, String schema) {
var setup = globalPhaseSetupFor(query, schema).orElse(null);
return resolveRerankCount(setup, query);
}
public Optional<ErrorMessage> validateNoSorting(Query query, String schema) {
var setup = globalPhaseSetupFor(query, schema).orElse(null);
if (setup == null) return Optional.empty();
var sorting = query.getRanking().getSorting();
if (sorting == null || sorting.fieldOrders() == null) return Optional.empty();
for (var fieldOrder : sorting.fieldOrders()) {
if (!fieldOrder.getSorter().getName().equals("[rank]")
|| fieldOrder.getSortOrder() != Sorting.Order.DESCENDING) {
return Optional.of(ErrorMessage.createIllegalQuery("Sorting is not supported with global phase"));
}
}
return Optional.empty();
}
public void rerankHits(Query query, Result result, String schema) {
var setup = globalPhaseSetupFor(query, schema).orElse(null);
if (setup == null) return;
var mainSpec = setup.globalPhaseEvalSpec;
var mainSrc = withQueryPrep(mainSpec.evalSource(), mainSpec.fromQuery(), query);
int rerankCount = resolveRerankCount(setup, query);
var normalizers = new ArrayList<NormalizerContext>();
for (var nSetup : setup.normalizers) {
var normSpec = nSetup.inputEvalSpec();
var normEvalSrc = withQueryPrep(normSpec.evalSource(), normSpec.fromQuery(), query);
normalizers.add(new NormalizerContext(nSetup.name(), nSetup.supplier().get(), normEvalSrc, normSpec.fromMF()));
}
var rescorer = new HitRescorer(mainSrc, mainSpec.fromMF(), normalizers);
var reranker = new ResultReranker(rescorer, rerankCount);
reranker.rerankHits(result);
hideImplicitMatchFeatures(result, setup.matchFeaturesToHide);
}
static Supplier<Evaluator> withQueryPrep(Supplier<Evaluator> evalSource, List<String> queryFeatures, Query query) {
var prepared = PreparedInput.findFromQuery(query, queryFeatures);
Supplier<Evaluator> supplier = () -> {
var evaluator = evalSource.get();
for (var entry : prepared) {
evaluator.bind(entry.name(), entry.value());
}
return evaluator;
};
return supplier;
}
private void hideImplicitMatchFeatures(Result result, Collection<String> namesToHide) {
if (namesToHide.size() == 0) return;
var filter = new MatchFeatureFilter(namesToHide);
for (var iterator = result.hits().deepIterator(); iterator.hasNext();) {
Hit hit = iterator.next();
if (hit.isMeta() || hit instanceof HitGroup) {
continue;
}
if (hit.getField("matchfeatures") instanceof FeatureData matchFeatures) {
if (matchFeatures.inspect() instanceof MatchFeatureData.HitValue hitValue) {
var newValue = hitValue.subsetFilter(filter);
if (newValue.fieldCount() == 0) {
hit.removeField("matchfeatures");
} else {
hit.setField("matchfeatures", newValue);
}
}
}
}
}
private Optional<GlobalPhaseSetup> globalPhaseSetupFor(Query query, String schema) {
return factory.evaluatorForSchema(schema)
.flatMap(evaluator -> evaluator.getGlobalPhaseSetup(query.getRanking().getProfile()));
}
} | class GlobalPhaseRanker {
private static final Logger logger = Logger.getLogger(GlobalPhaseRanker.class.getName());
private final RankProfilesEvaluatorFactory factory;
@Inject
public GlobalPhaseRanker(RankProfilesEvaluatorFactory factory) {
this.factory = factory;
logger.fine(() -> "Using factory: " + factory);
}
public int getRerankCount(Query query, String schema) {
var setup = globalPhaseSetupFor(query, schema).orElse(null);
return resolveRerankCount(setup, query);
}
public Optional<ErrorMessage> validateNoSorting(Query query, String schema) {
var setup = globalPhaseSetupFor(query, schema).orElse(null);
if (setup == null) return Optional.empty();
var sorting = query.getRanking().getSorting();
if (sorting == null || sorting.fieldOrders() == null) return Optional.empty();
for (var fieldOrder : sorting.fieldOrders()) {
if (!fieldOrder.getSorter().getName().equals("[rank]")
|| fieldOrder.getSortOrder() != Sorting.Order.DESCENDING) {
return Optional.of(ErrorMessage.createIllegalQuery("Sorting is not supported with global phase"));
}
}
return Optional.empty();
}
public void rerankHits(Query query, Result result, String schema) {
var setup = globalPhaseSetupFor(query, schema).orElse(null);
if (setup == null) return;
var mainSpec = setup.globalPhaseEvalSpec;
var mainSrc = withQueryPrep(mainSpec.evalSource(), mainSpec.fromQuery(), query);
int rerankCount = resolveRerankCount(setup, query);
var normalizers = new ArrayList<NormalizerContext>();
for (var nSetup : setup.normalizers) {
var normSpec = nSetup.inputEvalSpec();
var normEvalSrc = withQueryPrep(normSpec.evalSource(), normSpec.fromQuery(), query);
normalizers.add(new NormalizerContext(nSetup.name(), nSetup.supplier().get(), normEvalSrc, normSpec.fromMF()));
}
var rescorer = new HitRescorer(mainSrc, mainSpec.fromMF(), normalizers);
var reranker = new ResultReranker(rescorer, rerankCount);
reranker.rerankHits(result);
hideImplicitMatchFeatures(result, setup.matchFeaturesToHide);
}
static Supplier<Evaluator> withQueryPrep(Supplier<Evaluator> evalSource, List<String> queryFeatures, Query query) {
var prepared = PreparedInput.findFromQuery(query, queryFeatures);
Supplier<Evaluator> supplier = () -> {
var evaluator = evalSource.get();
for (var entry : prepared) {
evaluator.bind(entry.name(), entry.value());
}
return evaluator;
};
return supplier;
}
private void hideImplicitMatchFeatures(Result result, Collection<String> namesToHide) {
if (namesToHide.size() == 0) return;
var filter = new MatchFeatureFilter(namesToHide);
for (var iterator = result.hits().deepIterator(); iterator.hasNext();) {
Hit hit = iterator.next();
if (hit.isMeta() || hit instanceof HitGroup) {
continue;
}
if (hit.getField("matchfeatures") instanceof FeatureData matchFeatures) {
if (matchFeatures.inspect() instanceof MatchFeatureData.HitValue hitValue) {
var newValue = hitValue.subsetFilter(filter);
if (newValue.fieldCount() == 0) {
hit.removeField("matchfeatures");
} else {
hit.setField("matchfeatures", newValue);
}
}
}
}
}
private Optional<GlobalPhaseSetup> globalPhaseSetupFor(Query query, String schema) {
return factory.evaluatorForSchema(schema)
.flatMap(evaluator -> evaluator.getGlobalPhaseSetup(query.getRanking().getProfile()));
}
} |
no (added comment to clarify) | private int resolveRerankCount(GlobalPhaseSetup setup, Query query) {
if (setup == null) return 0;
Integer override = query.getRanking().getGlobalPhase().getRerankCount();
if (override != null) {
return override;
}
return setup.rerankCount;
} | if (setup == null) return 0; | private int resolveRerankCount(GlobalPhaseSetup setup, Query query) {
if (setup == null) {
return 0;
}
Integer override = query.getRanking().getGlobalPhase().getRerankCount();
if (override != null) {
return override;
}
return setup.rerankCount;
} | class GlobalPhaseRanker {
private static final Logger logger = Logger.getLogger(GlobalPhaseRanker.class.getName());
private final RankProfilesEvaluatorFactory factory;
@Inject
public GlobalPhaseRanker(RankProfilesEvaluatorFactory factory) {
this.factory = factory;
logger.fine(() -> "Using factory: " + factory);
}
public int getRerankCount(Query query, String schema) {
var setup = globalPhaseSetupFor(query, schema).orElse(null);
return resolveRerankCount(setup, query);
}
public Optional<ErrorMessage> validateNoSorting(Query query, String schema) {
var setup = globalPhaseSetupFor(query, schema).orElse(null);
if (setup == null) return Optional.empty();
var sorting = query.getRanking().getSorting();
if (sorting == null || sorting.fieldOrders() == null) return Optional.empty();
for (var fieldOrder : sorting.fieldOrders()) {
if (!fieldOrder.getSorter().getName().equals("[rank]")
|| fieldOrder.getSortOrder() != Sorting.Order.DESCENDING) {
return Optional.of(ErrorMessage.createIllegalQuery("Sorting is not supported with global phase"));
}
}
return Optional.empty();
}
public void rerankHits(Query query, Result result, String schema) {
var setup = globalPhaseSetupFor(query, schema).orElse(null);
if (setup == null) return;
var mainSpec = setup.globalPhaseEvalSpec;
var mainSrc = withQueryPrep(mainSpec.evalSource(), mainSpec.fromQuery(), query);
int rerankCount = resolveRerankCount(setup, query);
var normalizers = new ArrayList<NormalizerContext>();
for (var nSetup : setup.normalizers) {
var normSpec = nSetup.inputEvalSpec();
var normEvalSrc = withQueryPrep(normSpec.evalSource(), normSpec.fromQuery(), query);
normalizers.add(new NormalizerContext(nSetup.name(), nSetup.supplier().get(), normEvalSrc, normSpec.fromMF()));
}
var rescorer = new HitRescorer(mainSrc, mainSpec.fromMF(), normalizers);
var reranker = new ResultReranker(rescorer, rerankCount);
reranker.rerankHits(result);
hideImplicitMatchFeatures(result, setup.matchFeaturesToHide);
}
static Supplier<Evaluator> withQueryPrep(Supplier<Evaluator> evalSource, List<String> queryFeatures, Query query) {
var prepared = PreparedInput.findFromQuery(query, queryFeatures);
Supplier<Evaluator> supplier = () -> {
var evaluator = evalSource.get();
for (var entry : prepared) {
evaluator.bind(entry.name(), entry.value());
}
return evaluator;
};
return supplier;
}
private void hideImplicitMatchFeatures(Result result, Collection<String> namesToHide) {
if (namesToHide.size() == 0) return;
var filter = new MatchFeatureFilter(namesToHide);
for (var iterator = result.hits().deepIterator(); iterator.hasNext();) {
Hit hit = iterator.next();
if (hit.isMeta() || hit instanceof HitGroup) {
continue;
}
if (hit.getField("matchfeatures") instanceof FeatureData matchFeatures) {
if (matchFeatures.inspect() instanceof MatchFeatureData.HitValue hitValue) {
var newValue = hitValue.subsetFilter(filter);
if (newValue.fieldCount() == 0) {
hit.removeField("matchfeatures");
} else {
hit.setField("matchfeatures", newValue);
}
}
}
}
}
private Optional<GlobalPhaseSetup> globalPhaseSetupFor(Query query, String schema) {
return factory.evaluatorForSchema(schema)
.flatMap(evaluator -> evaluator.getGlobalPhaseSetup(query.getRanking().getProfile()));
}
} | class GlobalPhaseRanker {
private static final Logger logger = Logger.getLogger(GlobalPhaseRanker.class.getName());
private final RankProfilesEvaluatorFactory factory;
@Inject
public GlobalPhaseRanker(RankProfilesEvaluatorFactory factory) {
this.factory = factory;
logger.fine(() -> "Using factory: " + factory);
}
public int getRerankCount(Query query, String schema) {
var setup = globalPhaseSetupFor(query, schema).orElse(null);
return resolveRerankCount(setup, query);
}
public Optional<ErrorMessage> validateNoSorting(Query query, String schema) {
var setup = globalPhaseSetupFor(query, schema).orElse(null);
if (setup == null) return Optional.empty();
var sorting = query.getRanking().getSorting();
if (sorting == null || sorting.fieldOrders() == null) return Optional.empty();
for (var fieldOrder : sorting.fieldOrders()) {
if (!fieldOrder.getSorter().getName().equals("[rank]")
|| fieldOrder.getSortOrder() != Sorting.Order.DESCENDING) {
return Optional.of(ErrorMessage.createIllegalQuery("Sorting is not supported with global phase"));
}
}
return Optional.empty();
}
public void rerankHits(Query query, Result result, String schema) {
var setup = globalPhaseSetupFor(query, schema).orElse(null);
if (setup == null) return;
var mainSpec = setup.globalPhaseEvalSpec;
var mainSrc = withQueryPrep(mainSpec.evalSource(), mainSpec.fromQuery(), query);
int rerankCount = resolveRerankCount(setup, query);
var normalizers = new ArrayList<NormalizerContext>();
for (var nSetup : setup.normalizers) {
var normSpec = nSetup.inputEvalSpec();
var normEvalSrc = withQueryPrep(normSpec.evalSource(), normSpec.fromQuery(), query);
normalizers.add(new NormalizerContext(nSetup.name(), nSetup.supplier().get(), normEvalSrc, normSpec.fromMF()));
}
var rescorer = new HitRescorer(mainSrc, mainSpec.fromMF(), normalizers);
var reranker = new ResultReranker(rescorer, rerankCount);
reranker.rerankHits(result);
hideImplicitMatchFeatures(result, setup.matchFeaturesToHide);
}
static Supplier<Evaluator> withQueryPrep(Supplier<Evaluator> evalSource, List<String> queryFeatures, Query query) {
var prepared = PreparedInput.findFromQuery(query, queryFeatures);
Supplier<Evaluator> supplier = () -> {
var evaluator = evalSource.get();
for (var entry : prepared) {
evaluator.bind(entry.name(), entry.value());
}
return evaluator;
};
return supplier;
}
private void hideImplicitMatchFeatures(Result result, Collection<String> namesToHide) {
if (namesToHide.size() == 0) return;
var filter = new MatchFeatureFilter(namesToHide);
for (var iterator = result.hits().deepIterator(); iterator.hasNext();) {
Hit hit = iterator.next();
if (hit.isMeta() || hit instanceof HitGroup) {
continue;
}
if (hit.getField("matchfeatures") instanceof FeatureData matchFeatures) {
if (matchFeatures.inspect() instanceof MatchFeatureData.HitValue hitValue) {
var newValue = hitValue.subsetFilter(filter);
if (newValue.fieldCount() == 0) {
hit.removeField("matchfeatures");
} else {
hit.setField("matchfeatures", newValue);
}
}
}
}
}
private Optional<GlobalPhaseSetup> globalPhaseSetupFor(Query query, String schema) {
return factory.evaluatorForSchema(schema)
.flatMap(evaluator -> evaluator.getGlobalPhaseSetup(query.getRanking().getProfile()));
}
} |
```suggestion ``` | public PriceInformation priceForApplications(List<ApplicationResources> applicationResources, PricingInfo pricingInfo, Plan plan) {
ApplicationResources resources = applicationResources.get(0);
System.out.println(resources);
BigDecimal listPrice = resources.vcpu().multiply(valueOf(1000))
.add(resources.memoryGb().multiply(valueOf(100)))
.add(resources.diskGb().multiply(valueOf(10)))
.add(resources.enclaveVcpu().multiply(valueOf(1000))
.add(resources.enclaveMemoryGb().multiply(valueOf(100)))
.add(resources.enclaveDiskGb().multiply(valueOf(10))));
BigDecimal supportLevelCost = pricingInfo.supportLevel() == BASIC ? new BigDecimal("-160.00") : new BigDecimal("800.00");
BigDecimal listPriceWithSupport = listPrice.add(supportLevelCost);
BigDecimal enclaveDiscount = (resources.enclaveVcpu().compareTo(ZERO) > 0) ? new BigDecimal("-15.1234") : BigDecimal.ZERO;
BigDecimal volumeDiscount = new BigDecimal("-5.64315634");
BigDecimal committedAmountDiscount = new BigDecimal("-1.23");
BigDecimal totalAmount = listPrice.add(supportLevelCost).add(enclaveDiscount).add(volumeDiscount).add(committedAmountDiscount);
return new PriceInformation(listPriceWithSupport, volumeDiscount, committedAmountDiscount, enclaveDiscount, totalAmount);
} | System.out.println(resources); | public PriceInformation priceForApplications(List<ApplicationResources> applicationResources, PricingInfo pricingInfo, Plan plan) {
ApplicationResources resources = applicationResources.get(0);
BigDecimal listPrice = resources.vcpu().multiply(valueOf(1000))
.add(resources.memoryGb().multiply(valueOf(100)))
.add(resources.diskGb().multiply(valueOf(10)))
.add(resources.enclaveVcpu().multiply(valueOf(1000))
.add(resources.enclaveMemoryGb().multiply(valueOf(100)))
.add(resources.enclaveDiskGb().multiply(valueOf(10))));
BigDecimal supportLevelCost = pricingInfo.supportLevel() == BASIC ? new BigDecimal("-160.00") : new BigDecimal("800.00");
BigDecimal listPriceWithSupport = listPrice.add(supportLevelCost);
BigDecimal enclaveDiscount = (resources.enclaveVcpu().compareTo(ZERO) > 0) ? new BigDecimal("-15.1234") : BigDecimal.ZERO;
BigDecimal volumeDiscount = new BigDecimal("-5.64315634");
BigDecimal committedAmountDiscount = new BigDecimal("-1.23");
BigDecimal totalAmount = listPrice.add(supportLevelCost).add(enclaveDiscount).add(volumeDiscount).add(committedAmountDiscount);
return new PriceInformation(listPriceWithSupport, volumeDiscount, committedAmountDiscount, enclaveDiscount, totalAmount);
} | class MockPricingController implements PricingController {
@Override
public PriceInformation price(List<ClusterResources> clusterResources, PricingInfo pricingInfo, Plan plan) {
BigDecimal listPrice = valueOf(clusterResources.stream()
.mapToDouble(resources -> resources.nodes() *
(resources.nodeResources().vcpu() * 1000 +
resources.nodeResources().memoryGb() * 100 +
resources.nodeResources().diskGb() * 10))
.sum());
BigDecimal supportLevelCost = pricingInfo.supportLevel() == BASIC ? new BigDecimal("-160.00") : new BigDecimal("800.00");
BigDecimal listPriceWithSupport = listPrice.add(supportLevelCost);
BigDecimal enclaveDiscount = pricingInfo.enclave() ? new BigDecimal("-15.1234") : BigDecimal.ZERO;
BigDecimal volumeDiscount = new BigDecimal("-5.64315634");
BigDecimal committedAmountDiscount = new BigDecimal("-1.23");
BigDecimal totalAmount = listPrice.add(supportLevelCost).add(enclaveDiscount).add(volumeDiscount).add(committedAmountDiscount);
return new PriceInformation(listPriceWithSupport, volumeDiscount, committedAmountDiscount, enclaveDiscount, totalAmount);
}
@Override
} | class MockPricingController implements PricingController {
@Override
public PriceInformation price(List<ClusterResources> clusterResources, PricingInfo pricingInfo, Plan plan) {
BigDecimal listPrice = valueOf(clusterResources.stream()
.mapToDouble(resources -> resources.nodes() *
(resources.nodeResources().vcpu() * 1000 +
resources.nodeResources().memoryGb() * 100 +
resources.nodeResources().diskGb() * 10))
.sum());
BigDecimal supportLevelCost = pricingInfo.supportLevel() == BASIC ? new BigDecimal("-160.00") : new BigDecimal("800.00");
BigDecimal listPriceWithSupport = listPrice.add(supportLevelCost);
BigDecimal enclaveDiscount = pricingInfo.enclave() ? new BigDecimal("-15.1234") : BigDecimal.ZERO;
BigDecimal volumeDiscount = new BigDecimal("-5.64315634");
BigDecimal committedAmountDiscount = new BigDecimal("-1.23");
BigDecimal totalAmount = listPrice.add(supportLevelCost).add(enclaveDiscount).add(volumeDiscount).add(committedAmountDiscount);
return new PriceInformation(listPriceWithSupport, volumeDiscount, committedAmountDiscount, enclaveDiscount, totalAmount);
}
@Override
} |
Do we need to go via `double` here` or can this be parsed straight to `BigDecimal`? | private ApplicationResources applicationResources(String appResourcesString) {
List<String> elements = Arrays.stream(appResourcesString.split(",")).toList();
var applicationName = "default";
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
var enclaveVcpu = 0d;
var enclaveMemoryGb = 0d;
var enclaveDiskGb = 0d;
var enclaveGpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst().toLowerCase()) {
case "name" -> applicationName = element.getSecond();
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memorygb" -> memoryGb = parseDouble(element.getSecond());
case "diskgb" -> diskGb = parseDouble(element.getSecond());
case "gpumemorygb" -> gpuMemoryGb = parseDouble(element.getSecond());
case "enclavevcpu" -> enclaveVcpu = parseDouble(element.getSecond());
case "enclavememorygb" -> enclaveMemoryGb = parseDouble(element.getSecond());
case "enclavediskgb" -> enclaveDiskGb = parseDouble(element.getSecond());
case "enclavegpumemorygb" -> enclaveGpuMemoryGb = parseDouble(element.getSecond());
default -> throw new IllegalArgumentException("Unknown key '" + element.getFirst() + '\'');
}
}
return new ApplicationResources(applicationName,
valueOf(vcpu), valueOf(memoryGb), valueOf(diskGb), valueOf(gpuMemoryGb),
valueOf(enclaveVcpu), valueOf(enclaveMemoryGb), valueOf(enclaveDiskGb), valueOf(enclaveGpuMemoryGb));
} | case "vcpu" -> vcpu = parseDouble(element.getSecond()); | private ApplicationResources applicationResources(String appResourcesString) {
List<String> elements = Arrays.stream(appResourcesString.split(",")).toList();
var applicationName = "default";
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
var enclaveVcpu = 0d;
var enclaveMemoryGb = 0d;
var enclaveDiskGb = 0d;
var enclaveGpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst().toLowerCase()) {
case "name" -> applicationName = element.getSecond();
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memorygb" -> memoryGb = parseDouble(element.getSecond());
case "diskgb" -> diskGb = parseDouble(element.getSecond());
case "gpumemorygb" -> gpuMemoryGb = parseDouble(element.getSecond());
case "enclavevcpu" -> enclaveVcpu = parseDouble(element.getSecond());
case "enclavememorygb" -> enclaveMemoryGb = parseDouble(element.getSecond());
case "enclavediskgb" -> enclaveDiskGb = parseDouble(element.getSecond());
case "enclavegpumemorygb" -> enclaveGpuMemoryGb = parseDouble(element.getSecond());
default -> throw new IllegalArgumentException("Unknown key '" + element.getFirst() + '\'');
}
}
return new ApplicationResources(applicationName,
valueOf(vcpu), valueOf(memoryGb), valueOf(diskGb), valueOf(gpuMemoryGb),
valueOf(enclaveVcpu), valueOf(enclaveMemoryGb), valueOf(enclaveDiskGb), valueOf(enclaveGpuMemoryGb));
} | class PricingApiHandler extends ThreadedHttpRequestHandler {
private static final Logger log = Logger.getLogger(PricingApiHandler.class.getName());
private final Controller controller;
@Inject
public PricingApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
if (request.getMethod() != GET)
return methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
try {
return handleGET(request);
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/pricing/v1/pricing")) return pricing(request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse pricing(HttpRequest request) {
String rawQuery = request.getUri().getRawQuery();
var priceParameters = parseQuery(rawQuery);
PriceInformation price = calculatePrice(priceParameters);
return response(price, priceParameters);
}
private PriceInformation calculatePrice(PriceParameters priceParameters) {
var priceCalculator = controller.serviceRegistry().pricingController();
if (priceParameters.appResources == null)
return priceCalculator.price(priceParameters.clusterResources, priceParameters.pricingInfo, priceParameters.plan);
else
return priceCalculator.priceForApplications(priceParameters.appResources, priceParameters.pricingInfo, priceParameters.plan);
}
private PriceParameters parseQuery(String rawQuery) {
if (rawQuery == null) throw new IllegalArgumentException("No price information found in query");
List<String> elements = Arrays.stream(URLDecoder.decode(rawQuery, UTF_8).split("&")).toList();
if (keysAndValues(elements).stream().map(Pair::getFirst).toList().contains("resources"))
return parseQueryLegacy(elements);
else
return parseQuery(elements);
}
private PriceParameters parseQueryLegacy(List<String> elements) {
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ClusterResources> clusterResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst().toLowerCase()) {
case "committedspend" -> committedSpend = parseDouble(entry.getSecond());
case "enclave" -> enclave = Boolean.parseBoolean(entry.getSecond());
case "planid" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportlevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "resources" -> clusterResources.add(clusterResources(entry.getSecond()));
default -> throw new IllegalArgumentException("Unknown query parameter '" + entry.getFirst() + '\'');
}
}
if (clusterResources.isEmpty()) throw new IllegalArgumentException("No cluster resources found in query");
PricingInfo pricingInfo = new PricingInfo(enclave, supportLevel, committedSpend);
return new PriceParameters(clusterResources, pricingInfo, plan, null);
}
private PriceParameters parseQuery(List<String> elements) {
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var applicationName = "default";
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ApplicationResources> appResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst().toLowerCase()) {
case "committedspend" -> committedSpend = parseDouble(entry.getSecond());
case "planid" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportlevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "application" -> appResources.add(applicationResources(entry.getSecond()));
default -> throw new IllegalArgumentException("Unknown query parameter '" + entry.getFirst() + '\'');
}
}
if (appResources.isEmpty()) throw new IllegalArgumentException("No application resources found in query");
PricingInfo pricingInfo = new PricingInfo(false, supportLevel, committedSpend);
return new PriceParameters(List.of(), pricingInfo, plan, appResources);
}
private ClusterResources clusterResources(String resourcesString) {
List<String> elements = Arrays.stream(resourcesString.split(",")).toList();
var nodes = 0;
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst().toLowerCase()) {
case "nodes" -> nodes = parseInt(element.getSecond());
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memorygb" -> memoryGb = parseDouble(element.getSecond());
case "diskgb" -> diskGb = parseDouble(element.getSecond());
case "gpumemorygb" -> gpuMemoryGb = parseDouble(element.getSecond());
default -> throw new IllegalArgumentException("Unknown resource type '" + element.getFirst() + '\'');
}
}
var nodeResources = new NodeResources(vcpu, memoryGb, diskGb, 0);
if (gpuMemoryGb > 0)
nodeResources = nodeResources.with(new NodeResources.GpuResources(1, gpuMemoryGb));
return new ClusterResources(nodes, 1, nodeResources);
}
private List<Pair<String, String>> keysAndValues(List<String> elements) {
return elements.stream().map(element -> {
var index = element.indexOf("=");
if (index <= 0 || index == element.length() - 1)
throw new IllegalArgumentException("Error in query parameter, expected '=' between key and value: '" + element + '\'');
return new Pair<>(element.substring(0, index), element.substring(index + 1));
})
.toList();
}
private Optional<Plan> plan(String element) {
return controller.serviceRegistry().planRegistry().plan(element);
}
private static SlimeJsonResponse response(PriceInformation priceInfo, PriceParameters priceParameters) {
var slime = new Slime();
Cursor cursor = slime.setObject();
var array = cursor.setArray("priceInfo");
addItem(array, supportLevelDescription(priceParameters), priceInfo.listPriceWithSupport());
addItem(array, "Enclave discount", priceInfo.enclaveDiscount());
addItem(array, "Volume discount", priceInfo.volumeDiscount());
addItem(array, "Committed spend", priceInfo.committedAmountDiscount());
setBigDecimal(cursor, "totalAmount", priceInfo.totalAmount());
return new SlimeJsonResponse(slime);
}
private static String supportLevelDescription(PriceParameters priceParameters) {
String supportLevel = priceParameters.pricingInfo.supportLevel().name();
return supportLevel.substring(0,1).toUpperCase() + supportLevel.substring(1).toLowerCase() + " support unit price";
}
private static void addItem(Cursor array, String name, BigDecimal amount) {
if (amount.compareTo(BigDecimal.ZERO) != 0) {
var o = array.addObject();
o.setString("description", name);
setBigDecimal(o, "amount", amount);
}
}
private static void setBigDecimal(Cursor cursor, String name, BigDecimal value) {
cursor.setString(name, value.setScale(2, RoundingMode.HALF_UP).toPlainString());
}
private record PriceParameters(List<ClusterResources> clusterResources, PricingInfo pricingInfo, Plan plan,
List<ApplicationResources> appResources) {
}
} | class PricingApiHandler extends ThreadedHttpRequestHandler {
private static final Logger log = Logger.getLogger(PricingApiHandler.class.getName());
private final Controller controller;
@Inject
public PricingApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
if (request.getMethod() != GET)
return methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
try {
return handleGET(request);
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/pricing/v1/pricing")) return pricing(request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse pricing(HttpRequest request) {
String rawQuery = request.getUri().getRawQuery();
var priceParameters = parseQuery(rawQuery);
PriceInformation price = calculatePrice(priceParameters);
return response(price, priceParameters);
}
private PriceInformation calculatePrice(PriceParameters priceParameters) {
var priceCalculator = controller.serviceRegistry().pricingController();
if (priceParameters.appResources == null)
return priceCalculator.price(priceParameters.clusterResources, priceParameters.pricingInfo, priceParameters.plan);
else
return priceCalculator.priceForApplications(priceParameters.appResources, priceParameters.pricingInfo, priceParameters.plan);
}
private PriceParameters parseQuery(String rawQuery) {
if (rawQuery == null) throw new IllegalArgumentException("No price information found in query");
List<String> elements = Arrays.stream(URLDecoder.decode(rawQuery, UTF_8).split("&")).toList();
if (keysAndValues(elements).stream().map(Pair::getFirst).toList().contains("resources"))
return parseQueryLegacy(elements);
else
return parseQuery(elements);
}
private PriceParameters parseQueryLegacy(List<String> elements) {
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ClusterResources> clusterResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst().toLowerCase()) {
case "committedspend" -> committedSpend = parseDouble(entry.getSecond());
case "enclave" -> enclave = Boolean.parseBoolean(entry.getSecond());
case "planid" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportlevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "resources" -> clusterResources.add(clusterResources(entry.getSecond()));
default -> throw new IllegalArgumentException("Unknown query parameter '" + entry.getFirst() + '\'');
}
}
if (clusterResources.isEmpty()) throw new IllegalArgumentException("No cluster resources found in query");
PricingInfo pricingInfo = new PricingInfo(enclave, supportLevel, committedSpend);
return new PriceParameters(clusterResources, pricingInfo, plan, null);
}
private PriceParameters parseQuery(List<String> elements) {
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var applicationName = "default";
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ApplicationResources> appResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst().toLowerCase()) {
case "committedspend" -> committedSpend = parseDouble(entry.getSecond());
case "planid" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportlevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "application" -> appResources.add(applicationResources(entry.getSecond()));
default -> throw new IllegalArgumentException("Unknown query parameter '" + entry.getFirst() + '\'');
}
}
if (appResources.isEmpty()) throw new IllegalArgumentException("No application resources found in query");
PricingInfo pricingInfo = new PricingInfo(false, supportLevel, committedSpend);
return new PriceParameters(List.of(), pricingInfo, plan, appResources);
}
private ClusterResources clusterResources(String resourcesString) {
List<String> elements = Arrays.stream(resourcesString.split(",")).toList();
var nodes = 0;
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst().toLowerCase()) {
case "nodes" -> nodes = parseInt(element.getSecond());
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memorygb" -> memoryGb = parseDouble(element.getSecond());
case "diskgb" -> diskGb = parseDouble(element.getSecond());
case "gpumemorygb" -> gpuMemoryGb = parseDouble(element.getSecond());
default -> throw new IllegalArgumentException("Unknown resource type '" + element.getFirst() + '\'');
}
}
var nodeResources = new NodeResources(vcpu, memoryGb, diskGb, 0);
if (gpuMemoryGb > 0)
nodeResources = nodeResources.with(new NodeResources.GpuResources(1, gpuMemoryGb));
return new ClusterResources(nodes, 1, nodeResources);
}
private List<Pair<String, String>> keysAndValues(List<String> elements) {
return elements.stream().map(element -> {
var index = element.indexOf("=");
if (index <= 0 || index == element.length() - 1)
throw new IllegalArgumentException("Error in query parameter, expected '=' between key and value: '" + element + '\'');
return new Pair<>(element.substring(0, index), element.substring(index + 1));
})
.toList();
}
private Optional<Plan> plan(String element) {
return controller.serviceRegistry().planRegistry().plan(element);
}
private static SlimeJsonResponse response(PriceInformation priceInfo, PriceParameters priceParameters) {
var slime = new Slime();
Cursor cursor = slime.setObject();
var array = cursor.setArray("priceInfo");
addItem(array, supportLevelDescription(priceParameters), priceInfo.listPriceWithSupport());
addItem(array, "Enclave discount", priceInfo.enclaveDiscount());
addItem(array, "Volume discount", priceInfo.volumeDiscount());
addItem(array, "Committed spend", priceInfo.committedAmountDiscount());
setBigDecimal(cursor, "totalAmount", priceInfo.totalAmount());
return new SlimeJsonResponse(slime);
}
private static String supportLevelDescription(PriceParameters priceParameters) {
String supportLevel = priceParameters.pricingInfo.supportLevel().name();
return supportLevel.substring(0,1).toUpperCase() + supportLevel.substring(1).toLowerCase() + " support unit price";
}
private static void addItem(Cursor array, String name, BigDecimal amount) {
if (amount.compareTo(BigDecimal.ZERO) != 0) {
var o = array.addObject();
o.setString("description", name);
setBigDecimal(o, "amount", amount);
}
}
private static void setBigDecimal(Cursor cursor, String name, BigDecimal value) {
cursor.setString(name, value.setScale(2, RoundingMode.HALF_UP).toPlainString());
}
private record PriceParameters(List<ClusterResources> clusterResources, PricingInfo pricingInfo, Plan plan,
List<ApplicationResources> appResources) {
}
} |
Yeah, I thought about that as well earlier today, will look into it in a later PR | private ApplicationResources applicationResources(String appResourcesString) {
List<String> elements = Arrays.stream(appResourcesString.split(",")).toList();
var applicationName = "default";
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
var enclaveVcpu = 0d;
var enclaveMemoryGb = 0d;
var enclaveDiskGb = 0d;
var enclaveGpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst().toLowerCase()) {
case "name" -> applicationName = element.getSecond();
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memorygb" -> memoryGb = parseDouble(element.getSecond());
case "diskgb" -> diskGb = parseDouble(element.getSecond());
case "gpumemorygb" -> gpuMemoryGb = parseDouble(element.getSecond());
case "enclavevcpu" -> enclaveVcpu = parseDouble(element.getSecond());
case "enclavememorygb" -> enclaveMemoryGb = parseDouble(element.getSecond());
case "enclavediskgb" -> enclaveDiskGb = parseDouble(element.getSecond());
case "enclavegpumemorygb" -> enclaveGpuMemoryGb = parseDouble(element.getSecond());
default -> throw new IllegalArgumentException("Unknown key '" + element.getFirst() + '\'');
}
}
return new ApplicationResources(applicationName,
valueOf(vcpu), valueOf(memoryGb), valueOf(diskGb), valueOf(gpuMemoryGb),
valueOf(enclaveVcpu), valueOf(enclaveMemoryGb), valueOf(enclaveDiskGb), valueOf(enclaveGpuMemoryGb));
} | case "vcpu" -> vcpu = parseDouble(element.getSecond()); | private ApplicationResources applicationResources(String appResourcesString) {
List<String> elements = Arrays.stream(appResourcesString.split(",")).toList();
var applicationName = "default";
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
var enclaveVcpu = 0d;
var enclaveMemoryGb = 0d;
var enclaveDiskGb = 0d;
var enclaveGpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst().toLowerCase()) {
case "name" -> applicationName = element.getSecond();
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memorygb" -> memoryGb = parseDouble(element.getSecond());
case "diskgb" -> diskGb = parseDouble(element.getSecond());
case "gpumemorygb" -> gpuMemoryGb = parseDouble(element.getSecond());
case "enclavevcpu" -> enclaveVcpu = parseDouble(element.getSecond());
case "enclavememorygb" -> enclaveMemoryGb = parseDouble(element.getSecond());
case "enclavediskgb" -> enclaveDiskGb = parseDouble(element.getSecond());
case "enclavegpumemorygb" -> enclaveGpuMemoryGb = parseDouble(element.getSecond());
default -> throw new IllegalArgumentException("Unknown key '" + element.getFirst() + '\'');
}
}
return new ApplicationResources(applicationName,
valueOf(vcpu), valueOf(memoryGb), valueOf(diskGb), valueOf(gpuMemoryGb),
valueOf(enclaveVcpu), valueOf(enclaveMemoryGb), valueOf(enclaveDiskGb), valueOf(enclaveGpuMemoryGb));
} | class PricingApiHandler extends ThreadedHttpRequestHandler {
private static final Logger log = Logger.getLogger(PricingApiHandler.class.getName());
private final Controller controller;
@Inject
public PricingApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
if (request.getMethod() != GET)
return methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
try {
return handleGET(request);
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/pricing/v1/pricing")) return pricing(request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse pricing(HttpRequest request) {
String rawQuery = request.getUri().getRawQuery();
var priceParameters = parseQuery(rawQuery);
PriceInformation price = calculatePrice(priceParameters);
return response(price, priceParameters);
}
private PriceInformation calculatePrice(PriceParameters priceParameters) {
var priceCalculator = controller.serviceRegistry().pricingController();
if (priceParameters.appResources == null)
return priceCalculator.price(priceParameters.clusterResources, priceParameters.pricingInfo, priceParameters.plan);
else
return priceCalculator.priceForApplications(priceParameters.appResources, priceParameters.pricingInfo, priceParameters.plan);
}
private PriceParameters parseQuery(String rawQuery) {
if (rawQuery == null) throw new IllegalArgumentException("No price information found in query");
List<String> elements = Arrays.stream(URLDecoder.decode(rawQuery, UTF_8).split("&")).toList();
if (keysAndValues(elements).stream().map(Pair::getFirst).toList().contains("resources"))
return parseQueryLegacy(elements);
else
return parseQuery(elements);
}
private PriceParameters parseQueryLegacy(List<String> elements) {
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ClusterResources> clusterResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst().toLowerCase()) {
case "committedspend" -> committedSpend = parseDouble(entry.getSecond());
case "enclave" -> enclave = Boolean.parseBoolean(entry.getSecond());
case "planid" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportlevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "resources" -> clusterResources.add(clusterResources(entry.getSecond()));
default -> throw new IllegalArgumentException("Unknown query parameter '" + entry.getFirst() + '\'');
}
}
if (clusterResources.isEmpty()) throw new IllegalArgumentException("No cluster resources found in query");
PricingInfo pricingInfo = new PricingInfo(enclave, supportLevel, committedSpend);
return new PriceParameters(clusterResources, pricingInfo, plan, null);
}
private PriceParameters parseQuery(List<String> elements) {
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var applicationName = "default";
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ApplicationResources> appResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst().toLowerCase()) {
case "committedspend" -> committedSpend = parseDouble(entry.getSecond());
case "planid" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportlevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "application" -> appResources.add(applicationResources(entry.getSecond()));
default -> throw new IllegalArgumentException("Unknown query parameter '" + entry.getFirst() + '\'');
}
}
if (appResources.isEmpty()) throw new IllegalArgumentException("No application resources found in query");
PricingInfo pricingInfo = new PricingInfo(false, supportLevel, committedSpend);
return new PriceParameters(List.of(), pricingInfo, plan, appResources);
}
private ClusterResources clusterResources(String resourcesString) {
List<String> elements = Arrays.stream(resourcesString.split(",")).toList();
var nodes = 0;
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst().toLowerCase()) {
case "nodes" -> nodes = parseInt(element.getSecond());
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memorygb" -> memoryGb = parseDouble(element.getSecond());
case "diskgb" -> diskGb = parseDouble(element.getSecond());
case "gpumemorygb" -> gpuMemoryGb = parseDouble(element.getSecond());
default -> throw new IllegalArgumentException("Unknown resource type '" + element.getFirst() + '\'');
}
}
var nodeResources = new NodeResources(vcpu, memoryGb, diskGb, 0);
if (gpuMemoryGb > 0)
nodeResources = nodeResources.with(new NodeResources.GpuResources(1, gpuMemoryGb));
return new ClusterResources(nodes, 1, nodeResources);
}
private List<Pair<String, String>> keysAndValues(List<String> elements) {
return elements.stream().map(element -> {
var index = element.indexOf("=");
if (index <= 0 || index == element.length() - 1)
throw new IllegalArgumentException("Error in query parameter, expected '=' between key and value: '" + element + '\'');
return new Pair<>(element.substring(0, index), element.substring(index + 1));
})
.toList();
}
private Optional<Plan> plan(String element) {
return controller.serviceRegistry().planRegistry().plan(element);
}
private static SlimeJsonResponse response(PriceInformation priceInfo, PriceParameters priceParameters) {
var slime = new Slime();
Cursor cursor = slime.setObject();
var array = cursor.setArray("priceInfo");
addItem(array, supportLevelDescription(priceParameters), priceInfo.listPriceWithSupport());
addItem(array, "Enclave discount", priceInfo.enclaveDiscount());
addItem(array, "Volume discount", priceInfo.volumeDiscount());
addItem(array, "Committed spend", priceInfo.committedAmountDiscount());
setBigDecimal(cursor, "totalAmount", priceInfo.totalAmount());
return new SlimeJsonResponse(slime);
}
private static String supportLevelDescription(PriceParameters priceParameters) {
String supportLevel = priceParameters.pricingInfo.supportLevel().name();
return supportLevel.substring(0,1).toUpperCase() + supportLevel.substring(1).toLowerCase() + " support unit price";
}
private static void addItem(Cursor array, String name, BigDecimal amount) {
if (amount.compareTo(BigDecimal.ZERO) != 0) {
var o = array.addObject();
o.setString("description", name);
setBigDecimal(o, "amount", amount);
}
}
private static void setBigDecimal(Cursor cursor, String name, BigDecimal value) {
cursor.setString(name, value.setScale(2, RoundingMode.HALF_UP).toPlainString());
}
private record PriceParameters(List<ClusterResources> clusterResources, PricingInfo pricingInfo, Plan plan,
List<ApplicationResources> appResources) {
}
} | class PricingApiHandler extends ThreadedHttpRequestHandler {
private static final Logger log = Logger.getLogger(PricingApiHandler.class.getName());
private final Controller controller;
@Inject
public PricingApiHandler(Context parentCtx, Controller controller) {
super(parentCtx);
this.controller = controller;
}
@Override
public HttpResponse handle(HttpRequest request) {
if (request.getMethod() != GET)
return methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
try {
return handleGET(request);
} catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
} catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/pricing/v1/pricing")) return pricing(request);
return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse pricing(HttpRequest request) {
String rawQuery = request.getUri().getRawQuery();
var priceParameters = parseQuery(rawQuery);
PriceInformation price = calculatePrice(priceParameters);
return response(price, priceParameters);
}
private PriceInformation calculatePrice(PriceParameters priceParameters) {
var priceCalculator = controller.serviceRegistry().pricingController();
if (priceParameters.appResources == null)
return priceCalculator.price(priceParameters.clusterResources, priceParameters.pricingInfo, priceParameters.plan);
else
return priceCalculator.priceForApplications(priceParameters.appResources, priceParameters.pricingInfo, priceParameters.plan);
}
private PriceParameters parseQuery(String rawQuery) {
if (rawQuery == null) throw new IllegalArgumentException("No price information found in query");
List<String> elements = Arrays.stream(URLDecoder.decode(rawQuery, UTF_8).split("&")).toList();
if (keysAndValues(elements).stream().map(Pair::getFirst).toList().contains("resources"))
return parseQueryLegacy(elements);
else
return parseQuery(elements);
}
private PriceParameters parseQueryLegacy(List<String> elements) {
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ClusterResources> clusterResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst().toLowerCase()) {
case "committedspend" -> committedSpend = parseDouble(entry.getSecond());
case "enclave" -> enclave = Boolean.parseBoolean(entry.getSecond());
case "planid" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportlevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "resources" -> clusterResources.add(clusterResources(entry.getSecond()));
default -> throw new IllegalArgumentException("Unknown query parameter '" + entry.getFirst() + '\'');
}
}
if (clusterResources.isEmpty()) throw new IllegalArgumentException("No cluster resources found in query");
PricingInfo pricingInfo = new PricingInfo(enclave, supportLevel, committedSpend);
return new PriceParameters(clusterResources, pricingInfo, plan, null);
}
private PriceParameters parseQuery(List<String> elements) {
var supportLevel = SupportLevel.BASIC;
var enclave = false;
var committedSpend = 0d;
var applicationName = "default";
var plan = controller.serviceRegistry().planRegistry().defaultPlan();
List<ApplicationResources> appResources = new ArrayList<>();
for (Pair<String, String> entry : keysAndValues(elements)) {
switch (entry.getFirst().toLowerCase()) {
case "committedspend" -> committedSpend = parseDouble(entry.getSecond());
case "planid" -> plan = plan(entry.getSecond())
.orElseThrow(() -> new IllegalArgumentException("Unknown plan id " + entry.getSecond()));
case "supportlevel" -> supportLevel = SupportLevel.valueOf(entry.getSecond().toUpperCase());
case "application" -> appResources.add(applicationResources(entry.getSecond()));
default -> throw new IllegalArgumentException("Unknown query parameter '" + entry.getFirst() + '\'');
}
}
if (appResources.isEmpty()) throw new IllegalArgumentException("No application resources found in query");
PricingInfo pricingInfo = new PricingInfo(false, supportLevel, committedSpend);
return new PriceParameters(List.of(), pricingInfo, plan, appResources);
}
private ClusterResources clusterResources(String resourcesString) {
List<String> elements = Arrays.stream(resourcesString.split(",")).toList();
var nodes = 0;
var vcpu = 0d;
var memoryGb = 0d;
var diskGb = 0d;
var gpuMemoryGb = 0d;
for (var element : keysAndValues(elements)) {
switch (element.getFirst().toLowerCase()) {
case "nodes" -> nodes = parseInt(element.getSecond());
case "vcpu" -> vcpu = parseDouble(element.getSecond());
case "memorygb" -> memoryGb = parseDouble(element.getSecond());
case "diskgb" -> diskGb = parseDouble(element.getSecond());
case "gpumemorygb" -> gpuMemoryGb = parseDouble(element.getSecond());
default -> throw new IllegalArgumentException("Unknown resource type '" + element.getFirst() + '\'');
}
}
var nodeResources = new NodeResources(vcpu, memoryGb, diskGb, 0);
if (gpuMemoryGb > 0)
nodeResources = nodeResources.with(new NodeResources.GpuResources(1, gpuMemoryGb));
return new ClusterResources(nodes, 1, nodeResources);
}
private List<Pair<String, String>> keysAndValues(List<String> elements) {
return elements.stream().map(element -> {
var index = element.indexOf("=");
if (index <= 0 || index == element.length() - 1)
throw new IllegalArgumentException("Error in query parameter, expected '=' between key and value: '" + element + '\'');
return new Pair<>(element.substring(0, index), element.substring(index + 1));
})
.toList();
}
private Optional<Plan> plan(String element) {
return controller.serviceRegistry().planRegistry().plan(element);
}
private static SlimeJsonResponse response(PriceInformation priceInfo, PriceParameters priceParameters) {
var slime = new Slime();
Cursor cursor = slime.setObject();
var array = cursor.setArray("priceInfo");
addItem(array, supportLevelDescription(priceParameters), priceInfo.listPriceWithSupport());
addItem(array, "Enclave discount", priceInfo.enclaveDiscount());
addItem(array, "Volume discount", priceInfo.volumeDiscount());
addItem(array, "Committed spend", priceInfo.committedAmountDiscount());
setBigDecimal(cursor, "totalAmount", priceInfo.totalAmount());
return new SlimeJsonResponse(slime);
}
private static String supportLevelDescription(PriceParameters priceParameters) {
String supportLevel = priceParameters.pricingInfo.supportLevel().name();
return supportLevel.substring(0,1).toUpperCase() + supportLevel.substring(1).toLowerCase() + " support unit price";
}
private static void addItem(Cursor array, String name, BigDecimal amount) {
if (amount.compareTo(BigDecimal.ZERO) != 0) {
var o = array.addObject();
o.setString("description", name);
setBigDecimal(o, "amount", amount);
}
}
private static void setBigDecimal(Cursor cursor, String name, BigDecimal value) {
cursor.setString(name, value.setScale(2, RoundingMode.HALF_UP).toPlainString());
}
private record PriceParameters(List<ClusterResources> clusterResources, PricingInfo pricingInfo, Plan plan,
List<ApplicationResources> appResources) {
}
} |
This is called for _all_ tasks, regardless of whether they are executed. This means that the messenger thread would wipe the message, including the call stack, before the RPC layer was able to read it. | public void handleReply(Reply reply) {
if (destroyed.get()) {
reply.discard();
return;
}
long seqId = (Long)reply.getContext();
if (reply.getTrace().shouldTrace(TraceLevel.COMPONENT)) {
reply.getTrace().trace(TraceLevel.COMPONENT,
"Sequencer received reply with sequence id '" + seqId + "'.");
}
Message msg = null;
synchronized (this) {
Queue<Message> queue = seqMap.get(seqId);
if (queue == null || queue.isEmpty()) {
seqMap.remove(seqId);
} else {
msg = queue.remove();
}
}
if (msg != null) {
sequencedSend(msg);
}
ReplyHandler handler = reply.popHandler();
handler.handleReply(reply);
} | } | public void handleReply(Reply reply) {
if (destroyed.get()) {
reply.discard();
return;
}
long seqId = (Long)reply.getContext();
if (reply.getTrace().shouldTrace(TraceLevel.COMPONENT)) {
reply.getTrace().trace(TraceLevel.COMPONENT,
"Sequencer received reply with sequence id '" + seqId + "'.");
}
Message msg = null;
synchronized (this) {
Queue<Message> queue = seqMap.get(seqId);
if (queue == null || queue.isEmpty()) {
seqMap.remove(seqId);
} else {
msg = queue.remove();
}
}
if (msg != null) {
sequencedSend(msg);
}
ReplyHandler handler = reply.popHandler();
handler.handleReply(reply);
} | class Sequencer implements MessageHandler, ReplyHandler {
private final AtomicBoolean destroyed = new AtomicBoolean(false);
private final MessageHandler sender;
private final Map<Long, Queue<Message>> seqMap = new HashMap<>();
/**
* Constructs a new sequencer on top of the given async sender.
*
* @param sender The underlying sender.
*/
public Sequencer(MessageHandler sender) {
this.sender = sender;
}
/**
* Sets the destroyed flag to true. The very first time this method is called, it cleans up all its dependencies.
* Even if you retain a reference to this object, all of its content is allowed to be garbage collected.
*
* @return True if content existed and was destroyed.
*/
public boolean destroy() {
if (!destroyed.getAndSet(true)) {
synchronized (this) {
for (Queue<Message> queue : seqMap.values()) {
if (queue != null) {
for (Message msg : queue) {
msg.discard();
}
}
}
seqMap.clear();
}
return true;
}
return false;
}
/**
* Filter a message against the current sequencing state. If this method returns true, the message has been cleared
* for sending and its sequencing information has been added to the state. If this method returns false, it has been
* queued for later sending due to sequencing restrictions. This method also sets the sequence id as message
* context.
*
* @param msg the message to filter
* @return true if the message was consumed
*/
private boolean filter(Message msg) {
long seqId = msg.getSequenceId();
msg.setContext(seqId);
synchronized (this) {
if (seqMap.containsKey(seqId)) {
Queue<Message> queue = seqMap.get(seqId);
if (queue == null) {
queue = new LinkedList<>();
seqMap.put(seqId, queue);
}
if (msg.getTrace().shouldTrace(TraceLevel.COMPONENT)) {
msg.getTrace().trace(TraceLevel.COMPONENT,
"Sequencer queued message with sequence id '" + seqId + "'.");
}
queue.add(msg);
return false;
}
seqMap.put(seqId, null);
}
return true;
}
/**
* Internal method for forwarding a sequenced message to the underlying sender.
*
* @param msg The message to forward.
*/
private void sequencedSend(Message msg) {
if (msg.getTrace().shouldTrace(TraceLevel.COMPONENT)) {
msg.getTrace().trace(TraceLevel.COMPONENT,
"Sequencer sending message with sequence id '" + msg.getContext() + "'.");
}
msg.pushHandler(this);
sender.handleMessage(msg);
}
/**
* All messages pass through this handler when being sent by the owning source session. In case the message has no
* sequencing-id, it is simply passed through to the next handler in the chain. Sequenced messages are sent only if
* there is no queue for their id, otherwise they are queued.
*
* @param msg the message to send.
*/
@Override
public void handleMessage(Message msg) {
if (destroyed.get()) {
msg.discard();
return;
}
if (msg.hasSequenceId()) {
if (filter(msg)) {
sequencedSend(msg);
}
} else {
sender.handleMessage(msg);
}
}
/**
* Lookup the sequencing id of an incoming reply to pop the front of the corresponding queue, and then send the next
* message in line, if any.
*
* @param reply The reply received.
*/
@Override
} | class Sequencer implements MessageHandler, ReplyHandler {
private final AtomicBoolean destroyed = new AtomicBoolean(false);
private final MessageHandler sender;
private final Map<Long, Queue<Message>> seqMap = new HashMap<>();
/**
* Constructs a new sequencer on top of the given async sender.
*
* @param sender The underlying sender.
*/
public Sequencer(MessageHandler sender) {
this.sender = sender;
}
/**
* Sets the destroyed flag to true. The very first time this method is called, it cleans up all its dependencies.
* Even if you retain a reference to this object, all of its content is allowed to be garbage collected.
*
* @return True if content existed and was destroyed.
*/
public boolean destroy() {
if (!destroyed.getAndSet(true)) {
synchronized (this) {
for (Queue<Message> queue : seqMap.values()) {
if (queue != null) {
for (Message msg : queue) {
msg.discard();
}
}
}
seqMap.clear();
}
return true;
}
return false;
}
/**
* Filter a message against the current sequencing state. If this method returns true, the message has been cleared
* for sending and its sequencing information has been added to the state. If this method returns false, it has been
* queued for later sending due to sequencing restrictions. This method also sets the sequence id as message
* context.
*
* @param msg the message to filter
* @return true if the message was consumed
*/
private boolean filter(Message msg) {
long seqId = msg.getSequenceId();
msg.setContext(seqId);
synchronized (this) {
if (seqMap.containsKey(seqId)) {
Queue<Message> queue = seqMap.get(seqId);
if (queue == null) {
queue = new LinkedList<>();
seqMap.put(seqId, queue);
}
if (msg.getTrace().shouldTrace(TraceLevel.COMPONENT)) {
msg.getTrace().trace(TraceLevel.COMPONENT,
"Sequencer queued message with sequence id '" + seqId + "'.");
}
queue.add(msg);
return false;
}
seqMap.put(seqId, null);
}
return true;
}
/**
* Internal method for forwarding a sequenced message to the underlying sender.
*
* @param msg The message to forward.
*/
private void sequencedSend(Message msg) {
if (msg.getTrace().shouldTrace(TraceLevel.COMPONENT)) {
msg.getTrace().trace(TraceLevel.COMPONENT,
"Sequencer sending message with sequence id '" + msg.getContext() + "'.");
}
msg.pushHandler(this);
sender.handleMessage(msg);
}
/**
* All messages pass through this handler when being sent by the owning source session. In case the message has no
* sequencing-id, it is simply passed through to the next handler in the chain. Sequenced messages are sent only if
* there is no queue for their id, otherwise they are queued.
*
* @param msg the message to send.
*/
@Override
public void handleMessage(Message msg) {
if (destroyed.get()) {
msg.discard();
return;
}
if (msg.hasSequenceId()) {
if (filter(msg)) {
sequencedSend(msg);
}
} else {
sender.handleMessage(msg);
}
}
/**
* Lookup the sequencing id of an incoming reply to pop the front of the corresponding queue, and then send the next
* message in line, if any.
*
* @param reply The reply received.
*/
@Override
} |
Removing method as instructed by comment, @bratseth | public ClusterSpec withExclusivity(boolean exclusive) {
return new ClusterSpec(type, id, groupId, vespaVersion, exclusive, provisionForApplication, combinedId, dockerImageRepo, zoneEndpoint, stateful);
} | return new ClusterSpec(type, id, groupId, vespaVersion, exclusive, provisionForApplication, combinedId, dockerImageRepo, zoneEndpoint, stateful); | public ClusterSpec withExclusivity(boolean exclusive) {
return new ClusterSpec(type, id, groupId, vespaVersion, exclusive, combinedId, dockerImageRepo, zoneEndpoint, stateful);
} | class ClusterSpec {
private final Type type;
private final Id id;
/** The group id of these hosts, or empty if this represents a request for hosts */
private final Optional<Group> groupId;
private final Version vespaVersion;
private final boolean exclusive;
private final boolean provisionForApplication;
private final Optional<Id> combinedId;
private final Optional<DockerImage> dockerImageRepo;
private final ZoneEndpoint zoneEndpoint;
private final boolean stateful;
private ClusterSpec(Type type, Id id, Optional<Group> groupId, Version vespaVersion, boolean exclusive,
boolean provisionForApplication, Optional<Id> combinedId, Optional<DockerImage> dockerImageRepo,
ZoneEndpoint zoneEndpoint, boolean stateful) {
this.type = type;
this.id = id;
this.groupId = groupId;
this.vespaVersion = Objects.requireNonNull(vespaVersion, "vespaVersion cannot be null");
this.exclusive = exclusive;
this.provisionForApplication = provisionForApplication;
if (type == Type.combined) {
if (combinedId.isEmpty()) throw new IllegalArgumentException("combinedId must be set for cluster of type " + type);
} else {
if (combinedId.isPresent()) throw new IllegalArgumentException("combinedId must be empty for cluster of type " + type);
}
this.combinedId = combinedId;
if (dockerImageRepo.isPresent() && dockerImageRepo.get().tag().isPresent())
throw new IllegalArgumentException("dockerImageRepo is not allowed to have a tag");
this.dockerImageRepo = dockerImageRepo;
if (type.isContent() && !stateful) {
throw new IllegalArgumentException("Cluster of type " + type + " must be stateful");
}
this.zoneEndpoint = Objects.requireNonNull(zoneEndpoint);
this.stateful = stateful;
}
/** Returns the cluster type */
public Type type() { return type; }
/** Returns the cluster id */
public Id id() { return id; }
/** Returns the docker image repository part of a docker image we want this cluster to run */
public Optional<DockerImage> dockerImageRepo() { return dockerImageRepo; }
/** Returns the docker image (repository + vespa version) we want this cluster to run */
public Optional<String> dockerImage() { return dockerImageRepo.map(repo -> repo.withTag(vespaVersion).asString()); }
/** Returns any additional zone endpoint settings for application container clusters. */
public ZoneEndpoint zoneEndpoint() { return zoneEndpoint; }
/** Returns the version of Vespa that we want this cluster to run */
public Version vespaVersion() { return vespaVersion; }
/** Returns the group within the cluster this specifies, or empty to specify the whole cluster */
public Optional<Group> group() { return groupId; }
/** Returns the ID of the container cluster that is combined with this. This is only present for combined clusters */
public Optional<Id> combinedId() {
return combinedId;
}
/**
* Returns whether the physical hosts running the nodes of this application can
* also run nodes of other applications. Using exclusive nodes for containers increases security and cost.
*/
public boolean isExclusive() { return exclusive; }
/** Returns whether the physical hosts must be provisioned specifically for this application. */
public boolean provisionForApplication() { return provisionForApplication; }
/** Returns whether this cluster has state */
public boolean isStateful() { return stateful; }
public ClusterSpec with(Optional<Group> newGroup) {
return new ClusterSpec(type, id, newGroup, vespaVersion, exclusive, provisionForApplication, combinedId, dockerImageRepo, zoneEndpoint, stateful);
}
public ClusterSpec withProvisionForApplication(boolean provisionForApplication) {
return new ClusterSpec(type, id, groupId, vespaVersion, exclusive, provisionForApplication, combinedId, dockerImageRepo, zoneEndpoint, stateful);
}
/** Creates a ClusterSpec when requesting a cluster */
public static Builder request(Type type, Id id) {
return new Builder(type, id);
}
/** Creates a ClusterSpec for an existing cluster, group id and Vespa version needs to be set */
public static Builder specification(Type type, Id id) {
return new Builder(type, id);
}
public static class Builder {
private final Type type;
private final Id id;
private Optional<Group> groupId = Optional.empty();
private Optional<DockerImage> dockerImageRepo = Optional.empty();
private Version vespaVersion;
private boolean exclusive = false;
private boolean provisionForApplication = false;
private Optional<Id> combinedId = Optional.empty();
private ZoneEndpoint zoneEndpoint = ZoneEndpoint.defaultEndpoint;
private boolean stateful;
private Builder(Type type, Id id) {
this.type = type;
this.id = id;
this.stateful = type.isContent();
}
public ClusterSpec build() {
return new ClusterSpec(type, id, groupId, vespaVersion, exclusive, provisionForApplication, combinedId, dockerImageRepo, zoneEndpoint, stateful);
}
public Builder group(Group groupId) {
this.groupId = Optional.ofNullable(groupId);
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = vespaVersion;
return this;
}
public Builder vespaVersion(String vespaVersion) {
this.vespaVersion = Version.fromString(vespaVersion);
return this;
}
public Builder exclusive(boolean exclusive) {
this.exclusive = exclusive;
return this;
}
public Builder provisionForApplication(boolean provisionForApplication) {
this.provisionForApplication = provisionForApplication;
return this;
}
public Builder combinedId(Optional<Id> combinedId) {
this.combinedId = combinedId;
return this;
}
public Builder dockerImageRepository(Optional<DockerImage> dockerImageRepo) {
this.dockerImageRepo = dockerImageRepo;
return this;
}
public Builder loadBalancerSettings(ZoneEndpoint zoneEndpoint) {
this.zoneEndpoint = zoneEndpoint;
return this;
}
public Builder stateful(boolean stateful) {
this.stateful = stateful;
return this;
}
}
@Override
public String toString() {
return type + " " + id + " " + groupId.map(group -> group + " ").orElse("") + vespaVersion + (dockerImageRepo.map(repo -> " " + repo).orElse(""));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ClusterSpec that = (ClusterSpec) o;
return exclusive == that.exclusive &&
provisionForApplication == that.provisionForApplication &&
stateful == that.stateful &&
type == that.type &&
id.equals(that.id) &&
groupId.equals(that.groupId) &&
vespaVersion.equals(that.vespaVersion) &&
combinedId.equals(that.combinedId) &&
dockerImageRepo.equals(that.dockerImageRepo) &&
zoneEndpoint.equals(that.zoneEndpoint);
}
@Override
public int hashCode() {
return Objects.hash(type, id, groupId, vespaVersion, exclusive, provisionForApplication, combinedId, dockerImageRepo, zoneEndpoint, stateful);
}
/**
* Returns whether this satisfies other for allocation purposes. Only considers cluster ID and type, other fields
* are ignored.
*/
public boolean satisfies(ClusterSpec other) {
if ( ! other.id.equals(this.id)) return false;
if (other.type.isContent() || this.type.isContent())
return other.type.isContent() == this.type.isContent();
return other.type.equals(this.type);
}
/** A cluster type */
public enum Type {
admin,
container,
content,
combined;
/** Returns whether this runs a content cluster */
public boolean isContent() {
return this == content || this == combined;
}
/** Returns whether this runs a container cluster */
public boolean isContainer() {
return this == container || this == combined;
}
public static Type from(String typeName) {
return switch (typeName) {
case "admin" -> admin;
case "container" -> container;
case "content" -> content;
case "combined" -> combined;
default -> throw new IllegalArgumentException("Illegal cluster type '" + typeName + "'");
};
}
}
public static final class Id {
private final String id;
public Id(String id) {
this.id = Objects.requireNonNull(id, "Id cannot be null");
}
public static Id from(String id) {
return new Id(id);
}
public String value() { return id; }
@Override
public String toString() { return "cluster '" + id + "'"; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return ((Id)o).id.equals(this.id);
}
@Override
public int hashCode() {
return id.hashCode();
}
}
/** Identifier of a group within a cluster */
public static final class Group {
private final int index;
private Group(int index) {
this.index = index;
}
public static Group from(int index) { return new Group(index); }
public int index() { return index; }
@Override
public String toString() { return "group " + index; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return ((Group)o).index == this.index;
}
@Override
public int hashCode() { return index; }
}
} | class ClusterSpec {
private final Type type;
private final Id id;
/** The group id of these hosts, or empty if this represents a request for hosts */
private final Optional<Group> groupId;
private final Version vespaVersion;
private final boolean exclusive;
private final Optional<Id> combinedId;
private final Optional<DockerImage> dockerImageRepo;
private final ZoneEndpoint zoneEndpoint;
private final boolean stateful;
private ClusterSpec(Type type, Id id, Optional<Group> groupId, Version vespaVersion, boolean exclusive,
Optional<Id> combinedId, Optional<DockerImage> dockerImageRepo,
ZoneEndpoint zoneEndpoint, boolean stateful) {
this.type = type;
this.id = id;
this.groupId = groupId;
this.vespaVersion = Objects.requireNonNull(vespaVersion, "vespaVersion cannot be null");
this.exclusive = exclusive;
if (type == Type.combined) {
if (combinedId.isEmpty()) throw new IllegalArgumentException("combinedId must be set for cluster of type " + type);
} else {
if (combinedId.isPresent()) throw new IllegalArgumentException("combinedId must be empty for cluster of type " + type);
}
this.combinedId = combinedId;
if (dockerImageRepo.isPresent() && dockerImageRepo.get().tag().isPresent())
throw new IllegalArgumentException("dockerImageRepo is not allowed to have a tag");
this.dockerImageRepo = dockerImageRepo;
if (type.isContent() && !stateful) {
throw new IllegalArgumentException("Cluster of type " + type + " must be stateful");
}
this.zoneEndpoint = Objects.requireNonNull(zoneEndpoint);
this.stateful = stateful;
}
/** Returns the cluster type */
public Type type() { return type; }
/** Returns the cluster id */
public Id id() { return id; }
/** Returns the docker image repository part of a docker image we want this cluster to run */
public Optional<DockerImage> dockerImageRepo() { return dockerImageRepo; }
/** Returns the docker image (repository + vespa version) we want this cluster to run */
public Optional<String> dockerImage() { return dockerImageRepo.map(repo -> repo.withTag(vespaVersion).asString()); }
/** Returns any additional zone endpoint settings for application container clusters. */
public ZoneEndpoint zoneEndpoint() { return zoneEndpoint; }
/** Returns the version of Vespa that we want this cluster to run */
public Version vespaVersion() { return vespaVersion; }
/** Returns the group within the cluster this specifies, or empty to specify the whole cluster */
public Optional<Group> group() { return groupId; }
/** Returns the ID of the container cluster that is combined with this. This is only present for combined clusters */
public Optional<Id> combinedId() {
return combinedId;
}
/**
* Returns whether the physical hosts running the nodes of this application can
* also run nodes of other applications. Using exclusive nodes for containers increases security and cost.
*/
public boolean isExclusive() { return exclusive; }
/** Returns whether this cluster has state */
public boolean isStateful() { return stateful; }
public ClusterSpec with(Optional<Group> newGroup) {
return new ClusterSpec(type, id, newGroup, vespaVersion, exclusive, combinedId, dockerImageRepo, zoneEndpoint, stateful);
}
/** Creates a ClusterSpec when requesting a cluster */
public static Builder request(Type type, Id id) {
return new Builder(type, id);
}
/** Creates a ClusterSpec for an existing cluster, group id and Vespa version needs to be set */
public static Builder specification(Type type, Id id) {
return new Builder(type, id);
}
public static class Builder {
private final Type type;
private final Id id;
private Optional<Group> groupId = Optional.empty();
private Optional<DockerImage> dockerImageRepo = Optional.empty();
private Version vespaVersion;
private boolean exclusive = false;
private boolean provisionForApplication = false;
private Optional<Id> combinedId = Optional.empty();
private ZoneEndpoint zoneEndpoint = ZoneEndpoint.defaultEndpoint;
private boolean stateful;
private Builder(Type type, Id id) {
this.type = type;
this.id = id;
this.stateful = type.isContent();
}
public ClusterSpec build() {
return new ClusterSpec(type, id, groupId, vespaVersion, exclusive, combinedId, dockerImageRepo, zoneEndpoint, stateful);
}
public Builder group(Group groupId) {
this.groupId = Optional.ofNullable(groupId);
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = vespaVersion;
return this;
}
public Builder vespaVersion(String vespaVersion) {
this.vespaVersion = Version.fromString(vespaVersion);
return this;
}
public Builder exclusive(boolean exclusive) {
this.exclusive = exclusive;
return this;
}
public Builder provisionForApplication(boolean provisionForApplication) {
this.provisionForApplication = provisionForApplication;
return this;
}
public Builder combinedId(Optional<Id> combinedId) {
this.combinedId = combinedId;
return this;
}
public Builder dockerImageRepository(Optional<DockerImage> dockerImageRepo) {
this.dockerImageRepo = dockerImageRepo;
return this;
}
public Builder loadBalancerSettings(ZoneEndpoint zoneEndpoint) {
this.zoneEndpoint = zoneEndpoint;
return this;
}
public Builder stateful(boolean stateful) {
this.stateful = stateful;
return this;
}
}
@Override
public String toString() {
return type + " " + id + " " + groupId.map(group -> group + " ").orElse("") + vespaVersion + (dockerImageRepo.map(repo -> " " + repo).orElse(""));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ClusterSpec that = (ClusterSpec) o;
return exclusive == that.exclusive &&
stateful == that.stateful &&
type == that.type &&
id.equals(that.id) &&
groupId.equals(that.groupId) &&
vespaVersion.equals(that.vespaVersion) &&
combinedId.equals(that.combinedId) &&
dockerImageRepo.equals(that.dockerImageRepo) &&
zoneEndpoint.equals(that.zoneEndpoint);
}
@Override
public int hashCode() {
return Objects.hash(type, id, groupId, vespaVersion, exclusive, combinedId, dockerImageRepo, zoneEndpoint, stateful);
}
/**
* Returns whether this satisfies other for allocation purposes. Only considers cluster ID and type, other fields
* are ignored.
*/
public boolean satisfies(ClusterSpec other) {
if ( ! other.id.equals(this.id)) return false;
if (other.type.isContent() || this.type.isContent())
return other.type.isContent() == this.type.isContent();
return other.type.equals(this.type);
}
/** A cluster type */
public enum Type {
admin,
container,
content,
combined;
/** Returns whether this runs a content cluster */
public boolean isContent() {
return this == content || this == combined;
}
/** Returns whether this runs a container cluster */
public boolean isContainer() {
return this == container || this == combined;
}
public static Type from(String typeName) {
return switch (typeName) {
case "admin" -> admin;
case "container" -> container;
case "content" -> content;
case "combined" -> combined;
default -> throw new IllegalArgumentException("Illegal cluster type '" + typeName + "'");
};
}
}
public static final class Id {
private final String id;
public Id(String id) {
this.id = Objects.requireNonNull(id, "Id cannot be null");
}
public static Id from(String id) {
return new Id(id);
}
public String value() { return id; }
@Override
public String toString() { return "cluster '" + id + "'"; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return ((Id)o).id.equals(this.id);
}
@Override
public int hashCode() {
return id.hashCode();
}
}
/** Identifier of a group within a cluster */
public static final class Group {
private final int index;
private Group(int index) {
this.index = index;
}
public static Group from(int index) { return new Group(index); }
public int index() { return index; }
@Override
public String toString() { return "group " + index; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return ((Group)o).index == this.index;
}
@Override
public int hashCode() { return index; }
}
} |
Consider adding timeout value to message (handy when user relies on default timeout value). | public void dispatch(HttpRequest req, CompletableFuture<HttpResponse> vessel) {
client.getExecutor().execute(() -> {
Endpoint endpoint = findLeastBusyEndpoint(endpoints);
try {
endpoint.inflight.incrementAndGet();
long reqTimeoutMillis = req.timeLeft().toMillis();
if (reqTimeoutMillis <= 0) {
vessel.completeExceptionally(new TimeoutException("Operation timed out"));
return;
}
Request jettyReq = client.newRequest(URI.create(endpoint.uri + req.path()))
.version(HttpVersion.HTTP_2)
.method(HttpMethod.fromString(req.method()))
.headers(hs -> req.headers().forEach((k, v) -> hs.add(k, v.get())))
.idleTimeout(IDLE_TIMEOUT.toMillis(), MILLISECONDS)
.timeout(reqTimeoutMillis, MILLISECONDS);
if (req.body() != null) {
boolean shouldCompress = compression == gzip || compression == auto && req.body().length > 512;
byte[] bytes;
if (shouldCompress) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream(1 << 10);
try (GZIPOutputStream zip = new GZIPOutputStream(buffer)) {
zip.write(req.body());
} catch (IOException e) { throw new UncheckedIOException(e); }
bytes = buffer.toByteArray();
jettyReq.headers(hs -> hs.add(HttpHeader.CONTENT_ENCODING, "gzip"));
} else {
bytes = req.body();
}
jettyReq.body(new BytesRequestContent(APPLICATION_JSON.asString(), bytes));
}
jettyReq.send(new BufferingResponseListener() {
@Override
public void onComplete(Result result) {
endpoint.inflight.decrementAndGet();
if (result.isFailed()) vessel.completeExceptionally(result.getFailure());
else vessel.complete(new JettyResponse(result.getResponse(), getContent()));
}
});
} catch (Exception e) {
endpoint.inflight.decrementAndGet();
vessel.completeExceptionally(e);
}
});
} | vessel.completeExceptionally(new TimeoutException("Operation timed out")); | public void dispatch(HttpRequest req, CompletableFuture<HttpResponse> vessel) {
client.getExecutor().execute(() -> {
Endpoint endpoint = findLeastBusyEndpoint(endpoints);
try {
endpoint.inflight.incrementAndGet();
long reqTimeoutMillis = req.timeLeft().toMillis();
if (reqTimeoutMillis <= 0) {
vessel.completeExceptionally(new TimeoutException("operation timed out after '" + req.timeout() + "'"));
return;
}
Request jettyReq = client.newRequest(URI.create(endpoint.uri + req.path()))
.version(HttpVersion.HTTP_2)
.method(HttpMethod.fromString(req.method()))
.headers(hs -> req.headers().forEach((k, v) -> hs.add(k, v.get())))
.idleTimeout(IDLE_TIMEOUT.toMillis(), MILLISECONDS)
.timeout(reqTimeoutMillis, MILLISECONDS);
if (req.body() != null) {
boolean shouldCompress = compression == gzip || compression == auto && req.body().length > 512;
byte[] bytes;
if (shouldCompress) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream(1 << 10);
try (GZIPOutputStream zip = new GZIPOutputStream(buffer)) {
zip.write(req.body());
} catch (IOException e) { throw new UncheckedIOException(e); }
bytes = buffer.toByteArray();
jettyReq.headers(hs -> hs.add(HttpHeader.CONTENT_ENCODING, "gzip"));
} else {
bytes = req.body();
}
jettyReq.body(new BytesRequestContent(APPLICATION_JSON.asString(), bytes));
}
jettyReq.send(new BufferingResponseListener() {
@Override
public void onComplete(Result result) {
endpoint.inflight.decrementAndGet();
if (result.isFailed()) vessel.completeExceptionally(result.getFailure());
else vessel.complete(new JettyResponse(result.getResponse(), getContent()));
}
});
} catch (Exception e) {
endpoint.inflight.decrementAndGet();
vessel.completeExceptionally(e);
}
});
} | class JettyCluster implements Cluster {
private static final Duration IDLE_TIMEOUT = Duration.ofMinutes(15);
private final HttpClient client;
private final List<Endpoint> endpoints;
private final Compression compression;
JettyCluster(FeedClientBuilderImpl b) throws IOException {
this.client = createHttpClient(b);
this.endpoints = b.endpoints.stream().map(Endpoint::new).collect(Collectors.toList());
this.compression = b.compression;
}
@Override
@Override
public void close() {
try {
client.stop();
} catch (Exception e) { throw new RuntimeException(e); }
}
private static HttpClient createHttpClient(FeedClientBuilderImpl b) throws IOException {
SslContextFactory.Client clientSslCtxFactory = new SslContextFactory.Client();
clientSslCtxFactory.setSslContext(b.constructSslContext());
if (b.hostnameVerifier != null) {
clientSslCtxFactory.setHostnameVerifier(b.hostnameVerifier);
clientSslCtxFactory.setEndpointIdentificationAlgorithm(null);
}
ClientConnector connector = new ClientConnector();
int threads = Math.max(Math.min(Runtime.getRuntime().availableProcessors(), 32), 8);
connector.setExecutor(new QueuedThreadPool(threads));
connector.setSslContextFactory(clientSslCtxFactory);
connector.setIdleTimeout(IDLE_TIMEOUT);
boolean secureProxy = b.proxy != null && b.proxy.getScheme().equals("https");
connector.setConnectTimeout(Duration.ofSeconds(secureProxy ? 120 : 30));
HTTP2Client h2Client = new HTTP2Client(connector);
h2Client.setMaxConcurrentPushedStreams(b.maxStreamsPerConnection);
int initialWindow = Integer.MAX_VALUE;
h2Client.setInitialSessionRecvWindow(initialWindow);
h2Client.setInitialStreamRecvWindow(initialWindow);
ClientConnectionFactory.Info h1 = HttpClientConnectionFactory.HTTP11;
ClientConnectionFactory.Info http2 = new ClientConnectionFactoryOverHTTP2.HTTP2(h2Client);
HttpClientTransportDynamic transport = new HttpClientTransportDynamic(connector, http2, h1);
int connectionsPerEndpoint = b.connectionsPerEndpoint;
transport.setConnectionPoolFactory(dest -> {
MultiplexConnectionPool pool = new MultiplexConnectionPool(
dest, Pool.StrategyType.RANDOM, connectionsPerEndpoint, false, dest, Integer.MAX_VALUE);
pool.preCreateConnections(connectionsPerEndpoint);
if (secureProxy) pool.setMaxDuration(Duration.ofMinutes(1).toMillis());
else {
pool.setMaximizeConnections(true);
pool.setMaxDuration(b.connectionTtl.toMillis());
}
return pool;
});
HttpClient httpClient = new HttpClient(transport);
httpClient.setMaxRequestsQueuedPerDestination(Integer.MAX_VALUE);
httpClient.setFollowRedirects(false);
httpClient.setUserAgentField(
new HttpField(HttpHeader.USER_AGENT, String.format("vespa-feed-client/%s (Jetty:%s)", Vespa.VERSION, Jetty.VERSION)));
httpClient.setSocketAddressResolver(new Ipv4PreferringResolver(httpClient, Duration.ofSeconds(10)));
httpClient.setCookieStore(new HttpCookieStore.Empty());
if (b.proxy != null) addProxyConfiguration(b, httpClient);
try { httpClient.start(); } catch (Exception e) { throw new IOException(e); }
httpClient.getProtocolHandlers().remove(WWWAuthenticationProtocolHandler.NAME);
return httpClient;
}
private static void addProxyConfiguration(FeedClientBuilderImpl b, HttpClient httpClient) throws IOException {
Origin.Address address = new Origin.Address(b.proxy.getHost(), b.proxy.getPort());
Map<String, Supplier<String>> proxyHeadersCopy = new TreeMap<>(b.proxyRequestHeaders);
if (b.proxy.getScheme().equals("https")) {
SslContextFactory.Client proxySslCtxFactory = new SslContextFactory.Client();
if (b.proxyHostnameVerifier != null) {
proxySslCtxFactory.setHostnameVerifier(b.proxyHostnameVerifier);
proxySslCtxFactory.setEndpointIdentificationAlgorithm(null);
}
proxySslCtxFactory.setSslContext(b.constructProxySslContext());
try { proxySslCtxFactory.start(); } catch (Exception e) { throw new IOException(e); }
httpClient.getProxyConfiguration().addProxy(
new HttpProxy(address, proxySslCtxFactory, new Origin.Protocol(List.of("h2"), false)));
URI proxyUri = URI.create(endpointUri(b.proxy));
httpClient.getAuthenticationStore().addAuthenticationResult(new Authentication.Result() {
@Override public URI getURI() { return proxyUri; }
@Override public void apply(Request r) {
r.headers(hs -> proxyHeadersCopy.forEach((k, v) -> hs.add(k, v.get())));
}
});
} else {
httpClient.getProxyConfiguration().addProxy(
new HttpProxy(address, false, new Origin.Protocol(List.of("http/1.1"), false)));
httpClient.getRequestListeners().add(new Request.Listener() {
@Override
public void onHeaders(Request r) {
if (HttpMethod.CONNECT.is(r.getMethod()))
r.headers(hs -> proxyHeadersCopy.forEach((k, v) -> hs.add(k, v.get())));
}
});
}
}
private static Endpoint findLeastBusyEndpoint(List<Endpoint> endpoints) {
Endpoint leastBusy = endpoints.get(0);
int minInflight = leastBusy.inflight.get();
for (int i = 1; i < endpoints.size(); i++) {
Endpoint endpoint = endpoints.get(i);
int inflight = endpoint.inflight.get();
if (inflight < minInflight) {
leastBusy = endpoint;
minInflight = inflight;
}
}
return leastBusy;
}
private static int portOf(URI u) {
return u.getPort() == -1 ? u.getScheme().equals("http") ? 80 : 443 : u.getPort();
}
private static String endpointUri(URI uri) {
return String.format("%s:
}
private static class JettyResponse implements HttpResponse {
final Response response;
final byte[] content;
JettyResponse(Response response, byte[] content) { this.response = response; this.content = content; }
@Override public int code() { return response.getStatus(); }
@Override public byte[] body() { return content; }
@Override public String contentType() { return response.getHeaders().get(HttpHeader.CONTENT_TYPE); }
}
private static class Endpoint {
final AtomicInteger inflight = new AtomicInteger();
final String uri;
Endpoint(URI uri) { this.uri = endpointUri(uri); }
}
private static class Ipv4PreferringResolver extends AbstractLifeCycle implements SocketAddressResolver {
final HttpClient client;
final Duration timeout;
SocketAddressResolver.Async instance;
Ipv4PreferringResolver(HttpClient client, Duration timeout) { this.client = client; this.timeout = timeout; }
@Override
protected void doStart() {
this.instance = new SocketAddressResolver.Async(client.getExecutor(), client.getScheduler(), timeout.toMillis());
}
@Override
public void resolve(String host, int port, Promise<List<InetSocketAddress>> promise) {
instance.resolve(host, port, new Promise.Wrapper<List<InetSocketAddress>>(promise) {
@Override
public void succeeded(List<InetSocketAddress> result) {
if (result.size() <= 1) {
getPromise().succeeded(result);
return;
}
List<InetSocketAddress> ipv4Addresses = result.stream()
.filter(addr -> addr.getAddress() instanceof Inet4Address).collect(Collectors.toList());
if (ipv4Addresses.isEmpty()) {
getPromise().succeeded(result);
return;
}
getPromise().succeeded(ipv4Addresses);
}
});
}
}
} | class JettyCluster implements Cluster {
private static final Duration IDLE_TIMEOUT = Duration.ofMinutes(15);
private final HttpClient client;
private final List<Endpoint> endpoints;
private final Compression compression;
JettyCluster(FeedClientBuilderImpl b) throws IOException {
this.client = createHttpClient(b);
this.endpoints = b.endpoints.stream().map(Endpoint::new).collect(Collectors.toList());
this.compression = b.compression;
}
@Override
@Override
public void close() {
try {
client.stop();
} catch (Exception e) { throw new RuntimeException(e); }
}
private static HttpClient createHttpClient(FeedClientBuilderImpl b) throws IOException {
SslContextFactory.Client clientSslCtxFactory = new SslContextFactory.Client();
clientSslCtxFactory.setSslContext(b.constructSslContext());
if (b.hostnameVerifier != null) {
clientSslCtxFactory.setHostnameVerifier(b.hostnameVerifier);
clientSslCtxFactory.setEndpointIdentificationAlgorithm(null);
}
ClientConnector connector = new ClientConnector();
int threads = Math.max(Math.min(Runtime.getRuntime().availableProcessors(), 32), 8);
connector.setExecutor(new QueuedThreadPool(threads));
connector.setSslContextFactory(clientSslCtxFactory);
connector.setIdleTimeout(IDLE_TIMEOUT);
boolean secureProxy = b.proxy != null && b.proxy.getScheme().equals("https");
connector.setConnectTimeout(Duration.ofSeconds(secureProxy ? 120 : 30));
HTTP2Client h2Client = new HTTP2Client(connector);
h2Client.setMaxConcurrentPushedStreams(b.maxStreamsPerConnection);
int initialWindow = Integer.MAX_VALUE;
h2Client.setInitialSessionRecvWindow(initialWindow);
h2Client.setInitialStreamRecvWindow(initialWindow);
ClientConnectionFactory.Info h1 = HttpClientConnectionFactory.HTTP11;
ClientConnectionFactory.Info http2 = new ClientConnectionFactoryOverHTTP2.HTTP2(h2Client);
HttpClientTransportDynamic transport = new HttpClientTransportDynamic(connector, http2, h1);
int connectionsPerEndpoint = b.connectionsPerEndpoint;
transport.setConnectionPoolFactory(dest -> {
MultiplexConnectionPool pool = new MultiplexConnectionPool(
dest, Pool.StrategyType.RANDOM, connectionsPerEndpoint, false, dest, Integer.MAX_VALUE);
pool.preCreateConnections(connectionsPerEndpoint);
if (secureProxy) pool.setMaxDuration(Duration.ofMinutes(1).toMillis());
else {
pool.setMaximizeConnections(true);
pool.setMaxDuration(b.connectionTtl.toMillis());
}
return pool;
});
HttpClient httpClient = new HttpClient(transport);
httpClient.setMaxRequestsQueuedPerDestination(Integer.MAX_VALUE);
httpClient.setFollowRedirects(false);
httpClient.setUserAgentField(
new HttpField(HttpHeader.USER_AGENT, String.format("vespa-feed-client/%s (Jetty:%s)", Vespa.VERSION, Jetty.VERSION)));
httpClient.setSocketAddressResolver(new Ipv4PreferringResolver(httpClient, Duration.ofSeconds(10)));
httpClient.setCookieStore(new HttpCookieStore.Empty());
if (b.proxy != null) addProxyConfiguration(b, httpClient);
try { httpClient.start(); } catch (Exception e) { throw new IOException(e); }
httpClient.getProtocolHandlers().remove(WWWAuthenticationProtocolHandler.NAME);
return httpClient;
}
private static void addProxyConfiguration(FeedClientBuilderImpl b, HttpClient httpClient) throws IOException {
Origin.Address address = new Origin.Address(b.proxy.getHost(), b.proxy.getPort());
Map<String, Supplier<String>> proxyHeadersCopy = new TreeMap<>(b.proxyRequestHeaders);
if (b.proxy.getScheme().equals("https")) {
SslContextFactory.Client proxySslCtxFactory = new SslContextFactory.Client();
if (b.proxyHostnameVerifier != null) {
proxySslCtxFactory.setHostnameVerifier(b.proxyHostnameVerifier);
proxySslCtxFactory.setEndpointIdentificationAlgorithm(null);
}
proxySslCtxFactory.setSslContext(b.constructProxySslContext());
try { proxySslCtxFactory.start(); } catch (Exception e) { throw new IOException(e); }
httpClient.getProxyConfiguration().addProxy(
new HttpProxy(address, proxySslCtxFactory, new Origin.Protocol(List.of("h2"), false)));
URI proxyUri = URI.create(endpointUri(b.proxy));
httpClient.getAuthenticationStore().addAuthenticationResult(new Authentication.Result() {
@Override public URI getURI() { return proxyUri; }
@Override public void apply(Request r) {
r.headers(hs -> proxyHeadersCopy.forEach((k, v) -> hs.add(k, v.get())));
}
});
} else {
httpClient.getProxyConfiguration().addProxy(
new HttpProxy(address, false, new Origin.Protocol(List.of("http/1.1"), false)));
httpClient.getRequestListeners().add(new Request.Listener() {
@Override
public void onHeaders(Request r) {
if (HttpMethod.CONNECT.is(r.getMethod()))
r.headers(hs -> proxyHeadersCopy.forEach((k, v) -> hs.add(k, v.get())));
}
});
}
}
private static Endpoint findLeastBusyEndpoint(List<Endpoint> endpoints) {
Endpoint leastBusy = endpoints.get(0);
int minInflight = leastBusy.inflight.get();
for (int i = 1; i < endpoints.size(); i++) {
Endpoint endpoint = endpoints.get(i);
int inflight = endpoint.inflight.get();
if (inflight < minInflight) {
leastBusy = endpoint;
minInflight = inflight;
}
}
return leastBusy;
}
private static int portOf(URI u) {
return u.getPort() == -1 ? u.getScheme().equals("http") ? 80 : 443 : u.getPort();
}
private static String endpointUri(URI uri) {
return String.format("%s:
}
private static class JettyResponse implements HttpResponse {
final Response response;
final byte[] content;
JettyResponse(Response response, byte[] content) { this.response = response; this.content = content; }
@Override public int code() { return response.getStatus(); }
@Override public byte[] body() { return content; }
@Override public String contentType() { return response.getHeaders().get(HttpHeader.CONTENT_TYPE); }
}
private static class Endpoint {
final AtomicInteger inflight = new AtomicInteger();
final String uri;
Endpoint(URI uri) { this.uri = endpointUri(uri); }
}
private static class Ipv4PreferringResolver extends AbstractLifeCycle implements SocketAddressResolver {
final HttpClient client;
final Duration timeout;
SocketAddressResolver.Async instance;
Ipv4PreferringResolver(HttpClient client, Duration timeout) { this.client = client; this.timeout = timeout; }
@Override
protected void doStart() {
this.instance = new SocketAddressResolver.Async(client.getExecutor(), client.getScheduler(), timeout.toMillis());
}
@Override
public void resolve(String host, int port, Promise<List<InetSocketAddress>> promise) {
instance.resolve(host, port, new Promise.Wrapper<List<InetSocketAddress>>(promise) {
@Override
public void succeeded(List<InetSocketAddress> result) {
if (result.size() <= 1) {
getPromise().succeeded(result);
return;
}
List<InetSocketAddress> ipv4Addresses = result.stream()
.filter(addr -> addr.getAddress() instanceof Inet4Address).collect(Collectors.toList());
if (ipv4Addresses.isEmpty()) {
getPromise().succeeded(result);
return;
}
getPromise().succeeded(ipv4Addresses);
}
});
}
}
} |
To be demoted or removed later? | private void checkAndRedeploy(InfrastructureApplication application) {
log.log(INFO, () -> "Checking if " + application.name() + " should be redeployed");
if ( ! readiedTypes.remove(application)) return;
log.log(INFO, () -> "Trying to redeploy " + application.id() + " after completing provisioning of " + application.name());
try (Mutex lock = locks.apply(application.id())) {
if (application.nodeType().isHost() && nodes.get().state(State.ready).nodeType(application.nodeType()).isEmpty()) return;
log.log(INFO, () -> "Redeploying " + application.id() + " after completing provisioning of " + application.name());
try {
deployer.getDeployment(application.id()).ifPresent(Deployment::activate);
readied(childOf(application));
}
catch (RuntimeException e) {
log.log(WARNING, "Failed redeploying " + application.id() + ", will be retried by maintainer", e);
}
}
catch (UncheckedTimeoutException collision) {
readied(application);
}
} | log.log(INFO, () -> "Checking if " + application.name() + " should be redeployed"); | private void checkAndRedeploy(InfrastructureApplication application) {
if ( ! readiedTypes.remove(application)) return;
try (Mutex lock = locks.apply(application.id())) {
if (application.nodeType().isHost() && nodes.get().state(State.ready).nodeType(application.nodeType()).isEmpty()) return;
log.log(FINE, () -> "Redeploying " + application.id() + " after completing provisioning for " + application.name());
try {
deployer.getDeployment(application.id()).ifPresent(Deployment::activate);
childOf(application).ifPresent(this::readied);
}
catch (RuntimeException e) {
log.log(INFO, "Failed redeploying " + application.id() + ", will be retried by maintainer", e);
}
}
catch (UncheckedTimeoutException collision) {
readied(application);
}
} | class InfraApplicationRedeployer extends AbstractComponent {
private static final Logger log = Logger.getLogger(InfraApplicationRedeployer.class.getName());
private final ExecutorService executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("infra-application-redeployer-"));
private final Set<InfrastructureApplication> readiedTypes = new ConcurrentSkipListSet<>();
private final InfraDeployer deployer;
private final Function<ApplicationId, Mutex> locks;
private final Supplier<NodeList> nodes;
@Inject
public InfraApplicationRedeployer(InfraDeployer deployer, NodeRepository nodes) {
this(deployer, nodes.applications()::lockMaintenance, nodes.nodes()::list);
}
InfraApplicationRedeployer(InfraDeployer deployer, Function<ApplicationId, Mutex> locks, Supplier<NodeList> nodes) {
this.deployer = deployer;
this.locks = locks;
this.nodes = nodes;
}
public void readied(NodeType type) {
readied(applicationOf(type));
}
private void readied(InfrastructureApplication application) {
if (application == null) return;
if (readiedTypes.add(application)) executor.execute(() -> checkAndRedeploy(application));
}
private static InfrastructureApplication applicationOf(NodeType type) {
return switch (type) {
case host -> InfrastructureApplication.TENANT_HOST;
case confighost -> InfrastructureApplication.CONFIG_SERVER_HOST;
case controllerhost -> InfrastructureApplication.CONTROLLER_HOST;
case proxyhost -> InfrastructureApplication.PROXY_HOST;
default -> null;
};
}
private static InfrastructureApplication childOf(InfrastructureApplication application) {
return switch (application) {
case CONFIG_SERVER_HOST -> InfrastructureApplication.CONFIG_SERVER;
case CONTROLLER_HOST -> InfrastructureApplication.CONTROLLER;
default -> null;
};
}
@Override
public void deconstruct() {
executor.shutdown();
try {
if (executor.awaitTermination(10, TimeUnit.SECONDS)) return;
log.log(WARNING, "Redeployer did not shut down within 10 seconds");
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
executor.shutdownNow();
}
} | class InfraApplicationRedeployer implements AutoCloseable {
private static final Logger log = Logger.getLogger(InfraApplicationRedeployer.class.getName());
private final ExecutorService executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("infra-application-redeployer-"));
private final Set<InfrastructureApplication> readiedTypes = new ConcurrentSkipListSet<>();
private final InfraDeployer deployer;
private final Function<ApplicationId, Mutex> locks;
private final Supplier<NodeList> nodes;
@Inject
public InfraApplicationRedeployer(InfraDeployer deployer, NodeRepository nodes) {
this(deployer, nodes.applications()::lockMaintenance, nodes.nodes()::list);
}
InfraApplicationRedeployer(InfraDeployer deployer, Function<ApplicationId, Mutex> locks, Supplier<NodeList> nodes) {
this.deployer = deployer;
this.locks = locks;
this.nodes = nodes;
}
public void readied(NodeType type) {
applicationOf(type).ifPresent(this::readied);
}
private void readied(InfrastructureApplication application) {
if (application == null) return;
if (readiedTypes.add(application)) executor.execute(() -> checkAndRedeploy(application));
}
private static Optional<InfrastructureApplication> applicationOf(NodeType type) {
return switch (type) {
case host -> Optional.of(InfrastructureApplication.TENANT_HOST);
case confighost -> Optional.of(InfrastructureApplication.CONFIG_SERVER_HOST);
case config -> Optional.of(InfrastructureApplication.CONFIG_SERVER);
case controllerhost -> Optional.of(InfrastructureApplication.CONTROLLER_HOST);
case controller -> Optional.of(InfrastructureApplication.CONTROLLER);
case proxyhost -> Optional.of(InfrastructureApplication.PROXY_HOST);
default -> Optional.empty();
};
}
private static Optional<InfrastructureApplication> childOf(InfrastructureApplication application) {
return switch (application) {
case CONFIG_SERVER_HOST -> Optional.of(InfrastructureApplication.CONFIG_SERVER);
case CONTROLLER_HOST -> Optional.of(InfrastructureApplication.CONTROLLER);
default -> Optional.empty();
};
}
@Override
public void close() {
executor.shutdown();
try {
if (executor.awaitTermination(10, TimeUnit.SECONDS)) return;
log.log(WARNING, "Redeployer did not shut down within 10 seconds");
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
executor.shutdownNow();
}
} |
Test for `application.nodeType().isHost()` is wrong since this code should work for child types too. | private void checkAndRedeploy(InfrastructureApplication application) {
log.log(INFO, () -> "Checking if " + application.name() + " should be redeployed");
if ( ! readiedTypes.remove(application)) return;
log.log(INFO, () -> "Trying to redeploy " + application.id() + " after completing provisioning of " + application.name());
try (Mutex lock = locks.apply(application.id())) {
if (application.nodeType().isHost() && nodes.get().state(State.ready).nodeType(application.nodeType()).isEmpty()) return;
log.log(INFO, () -> "Redeploying " + application.id() + " after completing provisioning of " + application.name());
try {
deployer.getDeployment(application.id()).ifPresent(Deployment::activate);
readied(childOf(application));
}
catch (RuntimeException e) {
log.log(WARNING, "Failed redeploying " + application.id() + ", will be retried by maintainer", e);
}
}
catch (UncheckedTimeoutException collision) {
readied(application);
}
} | log.log(INFO, () -> "Trying to redeploy " + application.id() + " after completing provisioning of " + application.name()); | private void checkAndRedeploy(InfrastructureApplication application) {
if ( ! readiedTypes.remove(application)) return;
try (Mutex lock = locks.apply(application.id())) {
if (application.nodeType().isHost() && nodes.get().state(State.ready).nodeType(application.nodeType()).isEmpty()) return;
log.log(FINE, () -> "Redeploying " + application.id() + " after completing provisioning for " + application.name());
try {
deployer.getDeployment(application.id()).ifPresent(Deployment::activate);
childOf(application).ifPresent(this::readied);
}
catch (RuntimeException e) {
log.log(INFO, "Failed redeploying " + application.id() + ", will be retried by maintainer", e);
}
}
catch (UncheckedTimeoutException collision) {
readied(application);
}
} | class InfraApplicationRedeployer extends AbstractComponent {
private static final Logger log = Logger.getLogger(InfraApplicationRedeployer.class.getName());
private final ExecutorService executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("infra-application-redeployer-"));
private final Set<InfrastructureApplication> readiedTypes = new ConcurrentSkipListSet<>();
private final InfraDeployer deployer;
private final Function<ApplicationId, Mutex> locks;
private final Supplier<NodeList> nodes;
@Inject
public InfraApplicationRedeployer(InfraDeployer deployer, NodeRepository nodes) {
this(deployer, nodes.applications()::lockMaintenance, nodes.nodes()::list);
}
InfraApplicationRedeployer(InfraDeployer deployer, Function<ApplicationId, Mutex> locks, Supplier<NodeList> nodes) {
this.deployer = deployer;
this.locks = locks;
this.nodes = nodes;
}
public void readied(NodeType type) {
readied(applicationOf(type));
}
private void readied(InfrastructureApplication application) {
if (application == null) return;
if (readiedTypes.add(application)) executor.execute(() -> checkAndRedeploy(application));
}
private static InfrastructureApplication applicationOf(NodeType type) {
return switch (type) {
case host -> InfrastructureApplication.TENANT_HOST;
case confighost -> InfrastructureApplication.CONFIG_SERVER_HOST;
case controllerhost -> InfrastructureApplication.CONTROLLER_HOST;
case proxyhost -> InfrastructureApplication.PROXY_HOST;
default -> null;
};
}
private static InfrastructureApplication childOf(InfrastructureApplication application) {
return switch (application) {
case CONFIG_SERVER_HOST -> InfrastructureApplication.CONFIG_SERVER;
case CONTROLLER_HOST -> InfrastructureApplication.CONTROLLER;
default -> null;
};
}
@Override
public void deconstruct() {
executor.shutdown();
try {
if (executor.awaitTermination(10, TimeUnit.SECONDS)) return;
log.log(WARNING, "Redeployer did not shut down within 10 seconds");
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
executor.shutdownNow();
}
} | class InfraApplicationRedeployer implements AutoCloseable {
private static final Logger log = Logger.getLogger(InfraApplicationRedeployer.class.getName());
private final ExecutorService executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("infra-application-redeployer-"));
private final Set<InfrastructureApplication> readiedTypes = new ConcurrentSkipListSet<>();
private final InfraDeployer deployer;
private final Function<ApplicationId, Mutex> locks;
private final Supplier<NodeList> nodes;
@Inject
public InfraApplicationRedeployer(InfraDeployer deployer, NodeRepository nodes) {
this(deployer, nodes.applications()::lockMaintenance, nodes.nodes()::list);
}
InfraApplicationRedeployer(InfraDeployer deployer, Function<ApplicationId, Mutex> locks, Supplier<NodeList> nodes) {
this.deployer = deployer;
this.locks = locks;
this.nodes = nodes;
}
public void readied(NodeType type) {
applicationOf(type).ifPresent(this::readied);
}
private void readied(InfrastructureApplication application) {
if (application == null) return;
if (readiedTypes.add(application)) executor.execute(() -> checkAndRedeploy(application));
}
private static Optional<InfrastructureApplication> applicationOf(NodeType type) {
return switch (type) {
case host -> Optional.of(InfrastructureApplication.TENANT_HOST);
case confighost -> Optional.of(InfrastructureApplication.CONFIG_SERVER_HOST);
case config -> Optional.of(InfrastructureApplication.CONFIG_SERVER);
case controllerhost -> Optional.of(InfrastructureApplication.CONTROLLER_HOST);
case controller -> Optional.of(InfrastructureApplication.CONTROLLER);
case proxyhost -> Optional.of(InfrastructureApplication.PROXY_HOST);
default -> Optional.empty();
};
}
private static Optional<InfrastructureApplication> childOf(InfrastructureApplication application) {
return switch (application) {
case CONFIG_SERVER_HOST -> Optional.of(InfrastructureApplication.CONFIG_SERVER);
case CONTROLLER_HOST -> Optional.of(InfrastructureApplication.CONTROLLER);
default -> Optional.empty();
};
}
@Override
public void close() {
executor.shutdown();
try {
if (executor.awaitTermination(10, TimeUnit.SECONDS)) return;
log.log(WARNING, "Redeployer did not shut down within 10 seconds");
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
executor.shutdownNow();
}
} |
Ah, forgot to remove these. | private void checkAndRedeploy(InfrastructureApplication application) {
log.log(INFO, () -> "Checking if " + application.name() + " should be redeployed");
if ( ! readiedTypes.remove(application)) return;
log.log(INFO, () -> "Trying to redeploy " + application.id() + " after completing provisioning of " + application.name());
try (Mutex lock = locks.apply(application.id())) {
if (application.nodeType().isHost() && nodes.get().state(State.ready).nodeType(application.nodeType()).isEmpty()) return;
log.log(INFO, () -> "Redeploying " + application.id() + " after completing provisioning of " + application.name());
try {
deployer.getDeployment(application.id()).ifPresent(Deployment::activate);
readied(childOf(application));
}
catch (RuntimeException e) {
log.log(WARNING, "Failed redeploying " + application.id() + ", will be retried by maintainer", e);
}
}
catch (UncheckedTimeoutException collision) {
readied(application);
}
} | log.log(INFO, () -> "Checking if " + application.name() + " should be redeployed"); | private void checkAndRedeploy(InfrastructureApplication application) {
if ( ! readiedTypes.remove(application)) return;
try (Mutex lock = locks.apply(application.id())) {
if (application.nodeType().isHost() && nodes.get().state(State.ready).nodeType(application.nodeType()).isEmpty()) return;
log.log(FINE, () -> "Redeploying " + application.id() + " after completing provisioning for " + application.name());
try {
deployer.getDeployment(application.id()).ifPresent(Deployment::activate);
childOf(application).ifPresent(this::readied);
}
catch (RuntimeException e) {
log.log(INFO, "Failed redeploying " + application.id() + ", will be retried by maintainer", e);
}
}
catch (UncheckedTimeoutException collision) {
readied(application);
}
} | class InfraApplicationRedeployer extends AbstractComponent {
private static final Logger log = Logger.getLogger(InfraApplicationRedeployer.class.getName());
private final ExecutorService executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("infra-application-redeployer-"));
private final Set<InfrastructureApplication> readiedTypes = new ConcurrentSkipListSet<>();
private final InfraDeployer deployer;
private final Function<ApplicationId, Mutex> locks;
private final Supplier<NodeList> nodes;
@Inject
public InfraApplicationRedeployer(InfraDeployer deployer, NodeRepository nodes) {
this(deployer, nodes.applications()::lockMaintenance, nodes.nodes()::list);
}
InfraApplicationRedeployer(InfraDeployer deployer, Function<ApplicationId, Mutex> locks, Supplier<NodeList> nodes) {
this.deployer = deployer;
this.locks = locks;
this.nodes = nodes;
}
public void readied(NodeType type) {
readied(applicationOf(type));
}
private void readied(InfrastructureApplication application) {
if (application == null) return;
if (readiedTypes.add(application)) executor.execute(() -> checkAndRedeploy(application));
}
private static InfrastructureApplication applicationOf(NodeType type) {
return switch (type) {
case host -> InfrastructureApplication.TENANT_HOST;
case confighost -> InfrastructureApplication.CONFIG_SERVER_HOST;
case controllerhost -> InfrastructureApplication.CONTROLLER_HOST;
case proxyhost -> InfrastructureApplication.PROXY_HOST;
default -> null;
};
}
private static InfrastructureApplication childOf(InfrastructureApplication application) {
return switch (application) {
case CONFIG_SERVER_HOST -> InfrastructureApplication.CONFIG_SERVER;
case CONTROLLER_HOST -> InfrastructureApplication.CONTROLLER;
default -> null;
};
}
@Override
public void deconstruct() {
executor.shutdown();
try {
if (executor.awaitTermination(10, TimeUnit.SECONDS)) return;
log.log(WARNING, "Redeployer did not shut down within 10 seconds");
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
executor.shutdownNow();
}
} | class InfraApplicationRedeployer implements AutoCloseable {
private static final Logger log = Logger.getLogger(InfraApplicationRedeployer.class.getName());
private final ExecutorService executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("infra-application-redeployer-"));
private final Set<InfrastructureApplication> readiedTypes = new ConcurrentSkipListSet<>();
private final InfraDeployer deployer;
private final Function<ApplicationId, Mutex> locks;
private final Supplier<NodeList> nodes;
@Inject
public InfraApplicationRedeployer(InfraDeployer deployer, NodeRepository nodes) {
this(deployer, nodes.applications()::lockMaintenance, nodes.nodes()::list);
}
InfraApplicationRedeployer(InfraDeployer deployer, Function<ApplicationId, Mutex> locks, Supplier<NodeList> nodes) {
this.deployer = deployer;
this.locks = locks;
this.nodes = nodes;
}
public void readied(NodeType type) {
applicationOf(type).ifPresent(this::readied);
}
private void readied(InfrastructureApplication application) {
if (application == null) return;
if (readiedTypes.add(application)) executor.execute(() -> checkAndRedeploy(application));
}
private static Optional<InfrastructureApplication> applicationOf(NodeType type) {
return switch (type) {
case host -> Optional.of(InfrastructureApplication.TENANT_HOST);
case confighost -> Optional.of(InfrastructureApplication.CONFIG_SERVER_HOST);
case config -> Optional.of(InfrastructureApplication.CONFIG_SERVER);
case controllerhost -> Optional.of(InfrastructureApplication.CONTROLLER_HOST);
case controller -> Optional.of(InfrastructureApplication.CONTROLLER);
case proxyhost -> Optional.of(InfrastructureApplication.PROXY_HOST);
default -> Optional.empty();
};
}
private static Optional<InfrastructureApplication> childOf(InfrastructureApplication application) {
return switch (application) {
case CONFIG_SERVER_HOST -> Optional.of(InfrastructureApplication.CONFIG_SERVER);
case CONTROLLER_HOST -> Optional.of(InfrastructureApplication.CONTROLLER);
default -> Optional.empty();
};
}
@Override
public void close() {
executor.shutdown();
try {
if (executor.awaitTermination(10, TimeUnit.SECONDS)) return;
log.log(WARNING, "Redeployer did not shut down within 10 seconds");
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
executor.shutdownNow();
}
} |
It skips deployment if it's a host type without ready nodes. I don't think containers have an equivalent ready state? So they're never skipped, but only triggered (unconditionally) after we actually redeploy their host applications. | private void checkAndRedeploy(InfrastructureApplication application) {
log.log(INFO, () -> "Checking if " + application.name() + " should be redeployed");
if ( ! readiedTypes.remove(application)) return;
log.log(INFO, () -> "Trying to redeploy " + application.id() + " after completing provisioning of " + application.name());
try (Mutex lock = locks.apply(application.id())) {
if (application.nodeType().isHost() && nodes.get().state(State.ready).nodeType(application.nodeType()).isEmpty()) return;
log.log(INFO, () -> "Redeploying " + application.id() + " after completing provisioning of " + application.name());
try {
deployer.getDeployment(application.id()).ifPresent(Deployment::activate);
readied(childOf(application));
}
catch (RuntimeException e) {
log.log(WARNING, "Failed redeploying " + application.id() + ", will be retried by maintainer", e);
}
}
catch (UncheckedTimeoutException collision) {
readied(application);
}
} | log.log(INFO, () -> "Trying to redeploy " + application.id() + " after completing provisioning of " + application.name()); | private void checkAndRedeploy(InfrastructureApplication application) {
if ( ! readiedTypes.remove(application)) return;
try (Mutex lock = locks.apply(application.id())) {
if (application.nodeType().isHost() && nodes.get().state(State.ready).nodeType(application.nodeType()).isEmpty()) return;
log.log(FINE, () -> "Redeploying " + application.id() + " after completing provisioning for " + application.name());
try {
deployer.getDeployment(application.id()).ifPresent(Deployment::activate);
childOf(application).ifPresent(this::readied);
}
catch (RuntimeException e) {
log.log(INFO, "Failed redeploying " + application.id() + ", will be retried by maintainer", e);
}
}
catch (UncheckedTimeoutException collision) {
readied(application);
}
} | class InfraApplicationRedeployer extends AbstractComponent {
private static final Logger log = Logger.getLogger(InfraApplicationRedeployer.class.getName());
private final ExecutorService executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("infra-application-redeployer-"));
private final Set<InfrastructureApplication> readiedTypes = new ConcurrentSkipListSet<>();
private final InfraDeployer deployer;
private final Function<ApplicationId, Mutex> locks;
private final Supplier<NodeList> nodes;
@Inject
public InfraApplicationRedeployer(InfraDeployer deployer, NodeRepository nodes) {
this(deployer, nodes.applications()::lockMaintenance, nodes.nodes()::list);
}
InfraApplicationRedeployer(InfraDeployer deployer, Function<ApplicationId, Mutex> locks, Supplier<NodeList> nodes) {
this.deployer = deployer;
this.locks = locks;
this.nodes = nodes;
}
public void readied(NodeType type) {
readied(applicationOf(type));
}
private void readied(InfrastructureApplication application) {
if (application == null) return;
if (readiedTypes.add(application)) executor.execute(() -> checkAndRedeploy(application));
}
private static InfrastructureApplication applicationOf(NodeType type) {
return switch (type) {
case host -> InfrastructureApplication.TENANT_HOST;
case confighost -> InfrastructureApplication.CONFIG_SERVER_HOST;
case controllerhost -> InfrastructureApplication.CONTROLLER_HOST;
case proxyhost -> InfrastructureApplication.PROXY_HOST;
default -> null;
};
}
private static InfrastructureApplication childOf(InfrastructureApplication application) {
return switch (application) {
case CONFIG_SERVER_HOST -> InfrastructureApplication.CONFIG_SERVER;
case CONTROLLER_HOST -> InfrastructureApplication.CONTROLLER;
default -> null;
};
}
@Override
public void deconstruct() {
executor.shutdown();
try {
if (executor.awaitTermination(10, TimeUnit.SECONDS)) return;
log.log(WARNING, "Redeployer did not shut down within 10 seconds");
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
executor.shutdownNow();
}
} | class InfraApplicationRedeployer implements AutoCloseable {
private static final Logger log = Logger.getLogger(InfraApplicationRedeployer.class.getName());
private final ExecutorService executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("infra-application-redeployer-"));
private final Set<InfrastructureApplication> readiedTypes = new ConcurrentSkipListSet<>();
private final InfraDeployer deployer;
private final Function<ApplicationId, Mutex> locks;
private final Supplier<NodeList> nodes;
@Inject
public InfraApplicationRedeployer(InfraDeployer deployer, NodeRepository nodes) {
this(deployer, nodes.applications()::lockMaintenance, nodes.nodes()::list);
}
InfraApplicationRedeployer(InfraDeployer deployer, Function<ApplicationId, Mutex> locks, Supplier<NodeList> nodes) {
this.deployer = deployer;
this.locks = locks;
this.nodes = nodes;
}
public void readied(NodeType type) {
applicationOf(type).ifPresent(this::readied);
}
private void readied(InfrastructureApplication application) {
if (application == null) return;
if (readiedTypes.add(application)) executor.execute(() -> checkAndRedeploy(application));
}
private static Optional<InfrastructureApplication> applicationOf(NodeType type) {
return switch (type) {
case host -> Optional.of(InfrastructureApplication.TENANT_HOST);
case confighost -> Optional.of(InfrastructureApplication.CONFIG_SERVER_HOST);
case config -> Optional.of(InfrastructureApplication.CONFIG_SERVER);
case controllerhost -> Optional.of(InfrastructureApplication.CONTROLLER_HOST);
case controller -> Optional.of(InfrastructureApplication.CONTROLLER);
case proxyhost -> Optional.of(InfrastructureApplication.PROXY_HOST);
default -> Optional.empty();
};
}
private static Optional<InfrastructureApplication> childOf(InfrastructureApplication application) {
return switch (application) {
case CONFIG_SERVER_HOST -> Optional.of(InfrastructureApplication.CONFIG_SERVER);
case CONTROLLER_HOST -> Optional.of(InfrastructureApplication.CONTROLLER);
default -> Optional.empty();
};
}
@Override
public void close() {
executor.shutdown();
try {
if (executor.awaitTermination(10, TimeUnit.SECONDS)) return;
log.log(WARNING, "Redeployer did not shut down within 10 seconds");
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
executor.shutdownNow();
}
} |
Fixed, thanks! | private void checkAndRedeploy(InfrastructureApplication application) {
log.log(INFO, () -> "Checking if " + application.name() + " should be redeployed");
if ( ! readiedTypes.remove(application)) return;
log.log(INFO, () -> "Trying to redeploy " + application.id() + " after completing provisioning of " + application.name());
try (Mutex lock = locks.apply(application.id())) {
if (application.nodeType().isHost() && nodes.get().state(State.ready).nodeType(application.nodeType()).isEmpty()) return;
log.log(INFO, () -> "Redeploying " + application.id() + " after completing provisioning of " + application.name());
try {
deployer.getDeployment(application.id()).ifPresent(Deployment::activate);
readied(childOf(application));
}
catch (RuntimeException e) {
log.log(WARNING, "Failed redeploying " + application.id() + ", will be retried by maintainer", e);
}
}
catch (UncheckedTimeoutException collision) {
readied(application);
}
} | log.log(INFO, () -> "Checking if " + application.name() + " should be redeployed"); | private void checkAndRedeploy(InfrastructureApplication application) {
if ( ! readiedTypes.remove(application)) return;
try (Mutex lock = locks.apply(application.id())) {
if (application.nodeType().isHost() && nodes.get().state(State.ready).nodeType(application.nodeType()).isEmpty()) return;
log.log(FINE, () -> "Redeploying " + application.id() + " after completing provisioning for " + application.name());
try {
deployer.getDeployment(application.id()).ifPresent(Deployment::activate);
childOf(application).ifPresent(this::readied);
}
catch (RuntimeException e) {
log.log(INFO, "Failed redeploying " + application.id() + ", will be retried by maintainer", e);
}
}
catch (UncheckedTimeoutException collision) {
readied(application);
}
} | class InfraApplicationRedeployer extends AbstractComponent {
private static final Logger log = Logger.getLogger(InfraApplicationRedeployer.class.getName());
private final ExecutorService executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("infra-application-redeployer-"));
private final Set<InfrastructureApplication> readiedTypes = new ConcurrentSkipListSet<>();
private final InfraDeployer deployer;
private final Function<ApplicationId, Mutex> locks;
private final Supplier<NodeList> nodes;
@Inject
public InfraApplicationRedeployer(InfraDeployer deployer, NodeRepository nodes) {
this(deployer, nodes.applications()::lockMaintenance, nodes.nodes()::list);
}
InfraApplicationRedeployer(InfraDeployer deployer, Function<ApplicationId, Mutex> locks, Supplier<NodeList> nodes) {
this.deployer = deployer;
this.locks = locks;
this.nodes = nodes;
}
public void readied(NodeType type) {
readied(applicationOf(type));
}
private void readied(InfrastructureApplication application) {
if (application == null) return;
if (readiedTypes.add(application)) executor.execute(() -> checkAndRedeploy(application));
}
private static InfrastructureApplication applicationOf(NodeType type) {
return switch (type) {
case host -> InfrastructureApplication.TENANT_HOST;
case confighost -> InfrastructureApplication.CONFIG_SERVER_HOST;
case controllerhost -> InfrastructureApplication.CONTROLLER_HOST;
case proxyhost -> InfrastructureApplication.PROXY_HOST;
default -> null;
};
}
private static InfrastructureApplication childOf(InfrastructureApplication application) {
return switch (application) {
case CONFIG_SERVER_HOST -> InfrastructureApplication.CONFIG_SERVER;
case CONTROLLER_HOST -> InfrastructureApplication.CONTROLLER;
default -> null;
};
}
@Override
public void deconstruct() {
executor.shutdown();
try {
if (executor.awaitTermination(10, TimeUnit.SECONDS)) return;
log.log(WARNING, "Redeployer did not shut down within 10 seconds");
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
executor.shutdownNow();
}
} | class InfraApplicationRedeployer implements AutoCloseable {
private static final Logger log = Logger.getLogger(InfraApplicationRedeployer.class.getName());
private final ExecutorService executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("infra-application-redeployer-"));
private final Set<InfrastructureApplication> readiedTypes = new ConcurrentSkipListSet<>();
private final InfraDeployer deployer;
private final Function<ApplicationId, Mutex> locks;
private final Supplier<NodeList> nodes;
@Inject
public InfraApplicationRedeployer(InfraDeployer deployer, NodeRepository nodes) {
this(deployer, nodes.applications()::lockMaintenance, nodes.nodes()::list);
}
InfraApplicationRedeployer(InfraDeployer deployer, Function<ApplicationId, Mutex> locks, Supplier<NodeList> nodes) {
this.deployer = deployer;
this.locks = locks;
this.nodes = nodes;
}
public void readied(NodeType type) {
applicationOf(type).ifPresent(this::readied);
}
private void readied(InfrastructureApplication application) {
if (application == null) return;
if (readiedTypes.add(application)) executor.execute(() -> checkAndRedeploy(application));
}
private static Optional<InfrastructureApplication> applicationOf(NodeType type) {
return switch (type) {
case host -> Optional.of(InfrastructureApplication.TENANT_HOST);
case confighost -> Optional.of(InfrastructureApplication.CONFIG_SERVER_HOST);
case config -> Optional.of(InfrastructureApplication.CONFIG_SERVER);
case controllerhost -> Optional.of(InfrastructureApplication.CONTROLLER_HOST);
case controller -> Optional.of(InfrastructureApplication.CONTROLLER);
case proxyhost -> Optional.of(InfrastructureApplication.PROXY_HOST);
default -> Optional.empty();
};
}
private static Optional<InfrastructureApplication> childOf(InfrastructureApplication application) {
return switch (application) {
case CONFIG_SERVER_HOST -> Optional.of(InfrastructureApplication.CONFIG_SERVER);
case CONTROLLER_HOST -> Optional.of(InfrastructureApplication.CONTROLLER);
default -> Optional.empty();
};
}
@Override
public void close() {
executor.shutdown();
try {
if (executor.awaitTermination(10, TimeUnit.SECONDS)) return;
log.log(WARNING, "Redeployer did not shut down within 10 seconds");
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
executor.shutdownNow();
}
} |
isHost() can now be removed? | private void checkAndRedeploy(InfrastructureApplication application) {
log.log(INFO, () -> "Checking if " + application.name() + " should be redeployed");
if ( ! readiedTypes.remove(application)) return;
log.log(INFO, () -> "Trying to redeploy " + application.id() + " after completing provisioning of " + application.name());
try (Mutex lock = locks.apply(application.id())) {
if (application.nodeType().isHost() && nodes.get().state(State.ready).nodeType(application.nodeType()).isEmpty()) return;
log.log(INFO, () -> "Redeploying " + application.id() + " after completing provisioning of " + application.name());
try {
deployer.getDeployment(application.id()).ifPresent(Deployment::activate);
readied(childOf(application));
}
catch (RuntimeException e) {
log.log(WARNING, "Failed redeploying " + application.id() + ", will be retried by maintainer", e);
}
}
catch (UncheckedTimeoutException collision) {
readied(application);
}
} | log.log(INFO, () -> "Trying to redeploy " + application.id() + " after completing provisioning of " + application.name()); | private void checkAndRedeploy(InfrastructureApplication application) {
if ( ! readiedTypes.remove(application)) return;
try (Mutex lock = locks.apply(application.id())) {
if (application.nodeType().isHost() && nodes.get().state(State.ready).nodeType(application.nodeType()).isEmpty()) return;
log.log(FINE, () -> "Redeploying " + application.id() + " after completing provisioning for " + application.name());
try {
deployer.getDeployment(application.id()).ifPresent(Deployment::activate);
childOf(application).ifPresent(this::readied);
}
catch (RuntimeException e) {
log.log(INFO, "Failed redeploying " + application.id() + ", will be retried by maintainer", e);
}
}
catch (UncheckedTimeoutException collision) {
readied(application);
}
} | class InfraApplicationRedeployer extends AbstractComponent {
private static final Logger log = Logger.getLogger(InfraApplicationRedeployer.class.getName());
private final ExecutorService executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("infra-application-redeployer-"));
private final Set<InfrastructureApplication> readiedTypes = new ConcurrentSkipListSet<>();
private final InfraDeployer deployer;
private final Function<ApplicationId, Mutex> locks;
private final Supplier<NodeList> nodes;
@Inject
public InfraApplicationRedeployer(InfraDeployer deployer, NodeRepository nodes) {
this(deployer, nodes.applications()::lockMaintenance, nodes.nodes()::list);
}
InfraApplicationRedeployer(InfraDeployer deployer, Function<ApplicationId, Mutex> locks, Supplier<NodeList> nodes) {
this.deployer = deployer;
this.locks = locks;
this.nodes = nodes;
}
public void readied(NodeType type) {
readied(applicationOf(type));
}
private void readied(InfrastructureApplication application) {
if (application == null) return;
if (readiedTypes.add(application)) executor.execute(() -> checkAndRedeploy(application));
}
private static InfrastructureApplication applicationOf(NodeType type) {
return switch (type) {
case host -> InfrastructureApplication.TENANT_HOST;
case confighost -> InfrastructureApplication.CONFIG_SERVER_HOST;
case controllerhost -> InfrastructureApplication.CONTROLLER_HOST;
case proxyhost -> InfrastructureApplication.PROXY_HOST;
default -> null;
};
}
private static InfrastructureApplication childOf(InfrastructureApplication application) {
return switch (application) {
case CONFIG_SERVER_HOST -> InfrastructureApplication.CONFIG_SERVER;
case CONTROLLER_HOST -> InfrastructureApplication.CONTROLLER;
default -> null;
};
}
@Override
public void deconstruct() {
executor.shutdown();
try {
if (executor.awaitTermination(10, TimeUnit.SECONDS)) return;
log.log(WARNING, "Redeployer did not shut down within 10 seconds");
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
executor.shutdownNow();
}
} | class InfraApplicationRedeployer implements AutoCloseable {
private static final Logger log = Logger.getLogger(InfraApplicationRedeployer.class.getName());
private final ExecutorService executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("infra-application-redeployer-"));
private final Set<InfrastructureApplication> readiedTypes = new ConcurrentSkipListSet<>();
private final InfraDeployer deployer;
private final Function<ApplicationId, Mutex> locks;
private final Supplier<NodeList> nodes;
@Inject
public InfraApplicationRedeployer(InfraDeployer deployer, NodeRepository nodes) {
this(deployer, nodes.applications()::lockMaintenance, nodes.nodes()::list);
}
InfraApplicationRedeployer(InfraDeployer deployer, Function<ApplicationId, Mutex> locks, Supplier<NodeList> nodes) {
this.deployer = deployer;
this.locks = locks;
this.nodes = nodes;
}
public void readied(NodeType type) {
applicationOf(type).ifPresent(this::readied);
}
private void readied(InfrastructureApplication application) {
if (application == null) return;
if (readiedTypes.add(application)) executor.execute(() -> checkAndRedeploy(application));
}
private static Optional<InfrastructureApplication> applicationOf(NodeType type) {
return switch (type) {
case host -> Optional.of(InfrastructureApplication.TENANT_HOST);
case confighost -> Optional.of(InfrastructureApplication.CONFIG_SERVER_HOST);
case config -> Optional.of(InfrastructureApplication.CONFIG_SERVER);
case controllerhost -> Optional.of(InfrastructureApplication.CONTROLLER_HOST);
case controller -> Optional.of(InfrastructureApplication.CONTROLLER);
case proxyhost -> Optional.of(InfrastructureApplication.PROXY_HOST);
default -> Optional.empty();
};
}
private static Optional<InfrastructureApplication> childOf(InfrastructureApplication application) {
return switch (application) {
case CONFIG_SERVER_HOST -> Optional.of(InfrastructureApplication.CONFIG_SERVER);
case CONTROLLER_HOST -> Optional.of(InfrastructureApplication.CONTROLLER);
default -> Optional.empty();
};
}
@Override
public void close() {
executor.shutdown();
try {
if (executor.awaitTermination(10, TimeUnit.SECONDS)) return;
log.log(WARNING, "Redeployer did not shut down within 10 seconds");
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
executor.shutdownNow();
}
} |
Why not use Text.substringByCodepoints ? That is much easier to reason over for the user. | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.safeSubstring(text.getString(), config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | : Text.safeSubstring(text.getString(), config.getMaxTokenizeLength()); | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.substringByCodepoints(text.getString(), 0, config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} |
It's a larger change, and slower for long fields, although probably not by much. I agree it's a better end state. | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.safeSubstring(text.getString(), config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | : Text.safeSubstring(text.getString(), config.getMaxTokenizeLength()); | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.substringByCodepoints(text.getString(), 0, config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} |
Not sure I dare do that with `truncate(...)`, though. It specifically states "characters" in its documentation. WDYT? | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.safeSubstring(text.getString(), config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | : Text.safeSubstring(text.getString(), config.getMaxTokenizeLength()); | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.substringByCodepoints(text.getString(), 0, config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} |
For every body, except the java developers who came up with the high/low surrogate trick, a character is the whole character and not only the part of a character that fits inside a java character. | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.safeSubstring(text.getString(), config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | : Text.safeSubstring(text.getString(), config.getMaxTokenizeLength()); | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.substringByCodepoints(text.getString(), 0, config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} |
I agree with that, but this is a public-API method that specifies characters, and it could potentially be used in code which assumes the returned length in `char` is not greater than the argument. I'm not saying it's _very_ likely, but we'd break anyone who made that assumption. | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.safeSubstring(text.getString(), config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | : Text.safeSubstring(text.getString(), config.getMaxTokenizeLength()); | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.substringByCodepoints(text.getString(), 0, config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} |
Check usages of truncate, and see what they think they expect.... | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.safeSubstring(text.getString(), config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | : Text.safeSubstring(text.getString(), config.getMaxTokenizeLength()); | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.substringByCodepoints(text.getString(), 0, config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} |
The only usage I found you can safely treat a character as a human, and not an well aged java core developer... `ParseException p = new ParseException("Could not parse '" + Text.truncate(expression, 50) + "'");` | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.safeSubstring(text.getString(), config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | : Text.safeSubstring(text.getString(), config.getMaxTokenizeLength()); | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.substringByCodepoints(text.getString(), 0, config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} |
If you look at the comments you see this can not be used for anything machine readable. * If length is 4 or less the string will be truncated to length. * If length is longer than 4, it will be truncated at length-4 with " ..." added at the end. We have a lot of public methods that we have only added for our own convenience, and this looks like one of them. And the change we are making here will already change change something, that is just a theoretical difference. The only value it add over s.substring(), is the optional padding with "...". | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.safeSubstring(text.getString(), config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | : Text.safeSubstring(text.getString(), config.getMaxTokenizeLength()); | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.substringByCodepoints(text.getString(), 0, config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} |
Sold! | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.safeSubstring(text.getString(), config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | : Text.safeSubstring(text.getString(), config.getMaxTokenizeLength()); | public boolean annotate(StringFieldValue text) {
if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true;
Tokenizer tokenizer = factory.getTokenizer();
String input = (text.getString().length() <= config.getMaxTokenizeLength())
? text.getString()
: Text.substringByCodepoints(text.getString(), 0, config.getMaxTokenizeLength());
Iterable<Token> tokens = tokenizer.tokenize(input, config.getLanguage(), config.getStemMode(),
config.getRemoveAccents());
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
return true;
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} | class TermOccurrences {
final Map<String, Integer> termOccurrences = new HashMap<>();
final int maxOccurrences;
public TermOccurrences(int maxOccurences) {
this.maxOccurrences = maxOccurences;
}
boolean termCountBelowLimit(String term) {
String lowerCasedTerm = toLowerCase(term);
int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0);
if (occurrences >= maxOccurrences) return false;
termOccurrences.put(lowerCasedTerm, occurrences + 1);
return true;
}
} |
👍 | private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) {
if (candidate.parentHostname().isEmpty()) return false;
return nodes.values().stream().anyMatch(acceptedNode -> acceptedNode.parentHostname().equals(candidate.parentHostname()));
} | return nodes.values().stream().anyMatch(acceptedNode -> acceptedNode.parentHostname().equals(candidate.parentHostname())); | private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) {
if (candidate.parentHostname().isEmpty()) return false;
return nodes.values().stream().anyMatch(acceptedNode -> acceptedNode.parentHostname().equals(candidate.parentHostname()));
} | class NodeAllocation {
private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName());
/** List of all nodes in node-repository */
private final NodeList allNodes;
/** The application this list is for */
private final ApplicationId application;
/** The cluster this list is for */
private final ClusterSpec cluster;
/** The requested nodes of this list */
private final NodeSpec requested;
/** The node candidates this has accepted so far, keyed on hostname */
private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>();
/** The number of already allocated nodes of compatible size */
private int acceptedAndCompatible = 0;
/** The number of already allocated nodes which can be made compatible */
private int acceptedAndCompatibleOrResizable = 0;
/** The number of nodes rejected because of clashing parentHostname */
private int rejectedDueToClashingParentHost = 0;
/** The number of nodes rejected due to exclusivity constraints */
private int rejectedDueToExclusivity = 0;
private int rejectedDueToInsufficientRealResources = 0;
/** The number of nodes that just now was changed to retired */
private int wasRetiredJustNow = 0;
/** The number of nodes that just now was changed to retired to upgrade its host flavor */
private int wasRetiredDueToFlavorUpgrade = 0;
/** The node indexes to verify uniqueness of each member's index */
private final Set<Integer> indexes = new HashSet<>();
/** The next membership index to assign to a new node */
private final Supplier<Integer> nextIndex;
private final NodeRepository nodeRepository;
private final Optional<String> requiredHostFlavor;
private final boolean makeExclusive;
NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requested,
Supplier<Integer> nextIndex, NodeRepository nodeRepository, boolean makeExclusive) {
this.allNodes = allNodes;
this.application = application;
this.cluster = cluster;
this.requested = requested;
this.nextIndex = nextIndex;
this.nodeRepository = nodeRepository;
this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource())
.with(FetchVector.Dimension.INSTANCE_ID, application.serializedForm())
.with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name())
.with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value())
.value())
.filter(s -> !s.isBlank());
this.makeExclusive = makeExclusive;
}
/**
* Offer some nodes to this. The nodes may have an allocation to a different application or cluster,
* an allocation to this cluster, or no current allocation (in which case one is assigned).
*
* Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily
* reject allocated nodes due to index duplicates.
*
* @param candidates the nodes which are potentially on offer. These may belong to a different application etc.
*/
void offer(List<NodeCandidate> candidates) {
for (NodeCandidate candidate : candidates) {
if (candidate.allocation().isPresent()) {
Allocation allocation = candidate.allocation().get();
ClusterMembership membership = allocation.membership();
if ( ! allocation.owner().equals(application)) continue;
if ( ! membership.cluster().satisfies(cluster)) continue;
if (candidate.state() == Node.State.active && allocation.removable()) continue;
if (candidate.state() == Node.State.active && candidate.wantToFail()) continue;
if (indexes.contains(membership.index())) continue;
if (nodeRepository.zone().cloud().allowEnclave() && candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requested.cloudAccount())) continue;
boolean resizeable = requested.considerRetiring() && candidate.isResizable;
if (( ! saturated() && hasCompatibleResources(candidate) && requested.acceptable(candidate)) || acceptIncompatible(candidate)) {
candidate = candidate.withNode();
if (candidate.isValid())
acceptNode(candidate, shouldRetire(candidate, candidates), resizeable);
}
}
else if ( ! saturated() && hasCompatibleResources(candidate)) {
if ( ! nodeRepository.nodeResourceLimits().isWithinRealLimits(candidate, application, cluster)) {
++rejectedDueToInsufficientRealResources;
continue;
}
if ( violatesParentHostPolicy(candidate)) {
++rejectedDueToClashingParentHost;
continue;
}
switch (violatesExclusivity(candidate, makeExclusive)) {
case PARENT_HOST_NOT_EXCLUSIVE -> candidate = candidate.withExclusiveParent(true);
case NONE -> {}
case YES -> {
++rejectedDueToExclusivity;
continue;
}
}
if (candidate.wantToRetire()) {
continue;
}
candidate = candidate.allocate(application,
ClusterMembership.from(cluster, nextIndex.get()),
requested.resources().orElse(candidate.resources()),
nodeRepository.clock().instant());
if (candidate.isValid()) {
acceptNode(candidate, Retirement.none, false);
}
}
}
}
/** Returns the cause of retirement for given candidate */
private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) {
if ( ! requested.considerRetiring()) {
boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false);
return alreadyRetired ? Retirement.alreadyRetired : Retirement.none;
}
if ( ! nodeRepository.nodeResourceLimits().isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits;
if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy;
if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources;
if (candidate.parent.map(node -> node.status().wantToUpgradeFlavor()).orElse(false)) return Retirement.violatesHostFlavorGeneration;
if (candidate.wantToRetire()) return Retirement.hardRequest;
if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest;
if (violatesExclusivity(candidate, makeExclusive) != NodeCandidate.ExclusivityViolation.NONE) return Retirement.violatesExclusivity;
if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor;
if (candidate.violatesSpares) return Retirement.violatesSpares;
return Retirement.none;
}
private boolean violatesParentHostPolicy(NodeCandidate candidate) {
return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate);
}
private boolean checkForClashingParentHost() {
return nodeRepository.zone().system() == SystemName.main &&
nodeRepository.zone().environment().isProduction() &&
! application.instance().isTester();
}
private NodeCandidate.ExclusivityViolation violatesExclusivity(NodeCandidate candidate, boolean makeExclusive) {
return candidate.violatesExclusivity(cluster, application,
nodeRepository.exclusiveAllocation(cluster),
nodeRepository.exclusiveProvisioning(cluster),
nodeRepository.zone().cloud().allowHostSharing(), allNodes, makeExclusive);
}
/**
* Returns whether this node should be accepted into the cluster even if it is not currently desired
* (already enough nodes, or wrong resources, etc.).
* Such nodes will be marked retired during finalization of the list of accepted nodes when allowed.
* The conditions for this are:
*
* - We are forced to accept since we cannot remove gracefully (bootstrap).
*
* - This is a stateful node. These must always be retired before being removed to allow the cluster to
* migrate away data.
*
* - This is a container node and it is not desired due to having the wrong flavor. In this case this
* will (normally) obtain for all the current nodes in the cluster and so retiring before removing must
* be used to avoid removing all the current nodes at once, before the newly allocated replacements are
* initialized. (In the other case, where a container node is not desired because we have enough nodes we
* do want to remove it immediately to get immediate feedback on how the size reduction works out.)
*/
private boolean acceptIncompatible(NodeCandidate candidate) {
if (candidate.state() != Node.State.active) return false;
if (candidate.allocation().get().membership().retired()) return true;
if ( ! requested.considerRetiring())
return true;
return cluster.isStateful() ||
(cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate));
}
private boolean hasCompatibleResources(NodeCandidate candidate) {
return requested.isCompatible(candidate.resources()) || candidate.isResizable;
}
private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) {
Node node = candidate.toNode();
if (node.allocation().isPresent())
node = node.with(node.allocation().get().withRequestedResources(requested.resources().orElse(node.resources())));
if (retirement == Retirement.none) {
if (node.allocation().isEmpty()
|| (canBeUsedInGroupWithDeficiency(node) &&
! (requested.needsResize(node) && (node.allocation().get().membership().retired() || ! requested.considerRetiring())))) {
acceptedAndCompatible++;
}
if (hasCompatibleResources(candidate))
acceptedAndCompatibleOrResizable++;
if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired()))
node = resize(node);
if (node.state() != Node.State.active)
node = node.unretire().removable(false);
} else if (retirement != Retirement.alreadyRetired) {
LOG.info("Retiring " + node + " because " + retirement.description());
++wasRetiredJustNow;
if (retirement == Retirement.violatesHostFlavorGeneration) {
++wasRetiredDueToFlavorUpgrade;
}
node = node.retire(nodeRepository.clock().instant());
}
if ( ! node.allocation().get().membership().cluster().equals(cluster)) {
node = setCluster(cluster.with(node.allocation().get().membership().cluster().group()), node);
}
candidate = candidate.withNode(node, retirement != Retirement.none && retirement != Retirement.alreadyRetired );
indexes.add(node.allocation().get().membership().index());
nodes.put(node.hostname(), candidate);
return node;
}
private boolean canBeUsedInGroupWithDeficiency(Node node) {
if (requested.count().isEmpty()) return true;
if (node.allocation().isEmpty()) return true;
var group = node.allocation().get().membership().cluster().group();
if (group.isEmpty()) return true;
long nodesInGroup = nodes.values().stream().filter(n -> groupOf(n).equals(group)).count();
return nodesInGroup < requested.groupSize();
}
private Optional<ClusterSpec.Group> groupOf(NodeCandidate candidate) {
return candidate.allocation().flatMap(a -> a.membership().cluster().group());
}
private Node resize(Node node) {
NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources();
return node.with(new Flavor(requested.resources().get()
.with(hostResources.diskSpeed())
.with(hostResources.storageType())
.with(hostResources.architecture())),
Agent.application, nodeRepository.clock().instant());
}
private Node setCluster(ClusterSpec cluster, Node node) {
ClusterMembership membership = node.allocation().get().membership().with(cluster);
return node.with(node.allocation().get().with(membership));
}
/** Returns true if no more nodes are needed in this list */
public boolean saturated() {
return requested.saturatedBy(acceptedAndCompatible);
}
/** Returns true if the content of this list is sufficient to meet the request */
boolean fulfilled() {
return requested.fulfilledBy(acceptedAndCompatibleOrResizable());
}
/** Returns true if this allocation was already fulfilled and resulted in no new changes */
boolean fulfilledAndNoChanges() {
return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty();
}
/** Returns true if this allocation has retired nodes */
boolean hasRetiredJustNow() {
return wasRetiredJustNow > 0;
}
/**
* Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}.
*
* @return empty if the requested spec is already fulfilled. Otherwise, returns {@link HostDeficit} containing the
* flavor and host count required to cover the deficit.
*/
Optional<HostDeficit> hostDeficit() {
if (nodeType().isHost()) {
return Optional.empty();
}
int deficit = requested.fulfilledDeficitCount(acceptedAndCompatibleOrResizable());
boolean dueToFlavorUpgrade = deficit == wasRetiredDueToFlavorUpgrade;
return Optional.of(new HostDeficit(requested.resources().orElseGet(NodeResources::unspecified),
deficit,
dueToFlavorUpgrade))
.filter(hostDeficit -> hostDeficit.count() > 0);
}
/** Returns the indices to use when provisioning hosts for this */
List<Integer> provisionIndices(int count) {
if (count < 1) throw new IllegalArgumentException("Count must be positive");
NodeType hostType = requested.type().hostType();
if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count);
Set<Integer> currentIndices = allNodes.nodeType(hostType)
.not().state(Node.State.deprovisioned)
.hostnames()
.stream()
.map(NodeAllocation::parseIndex)
.collect(Collectors.toSet());
List<Integer> indices = new ArrayList<>(count);
for (int i = 1; indices.size() < count; i++) {
if (!currentIndices.contains(i)) {
indices.add(i);
}
}
Integer myIndex = parseIndex(HostName.getLocalhost());
indices.remove(myIndex);
return indices;
}
/** The node type this is allocating */
NodeType nodeType() {
return requested.type();
}
List<Node> parentsRequiredToBeExclusive() {
return nodes.values()
.stream()
.filter(candidate -> candidate.exclusiveParent)
.map(candidate -> candidate.parent.orElseThrow())
.toList();
}
List<Node> finalNodes() {
GroupAssigner groupAssigner = new GroupAssigner(requested, allNodes, nodeRepository.clock());
Collection<NodeCandidate> finalNodes = groupAssigner.assignTo(nodes.values());
nodes.clear();
finalNodes.forEach(candidate -> nodes.put(candidate.toNode().hostname(), candidate));
return finalNodes.stream().map(NodeCandidate::toNode).toList();
}
List<Node> reservableNodes() {
EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved);
return matching(n -> ! n.isNew && reservableStates.contains(n.state())).toList();
}
List<Node> newNodes() {
return matching(node -> node.isNew).toList();
}
private Stream<Node> matching(Predicate<NodeCandidate> predicate) {
return nodes.values().stream().filter(predicate).map(NodeCandidate::toNode);
}
/** Returns the number of nodes accepted this far */
private int acceptedAndCompatibleOrResizable() {
if (nodeType() == NodeType.tenant) return acceptedAndCompatibleOrResizable;
return allNodes.nodeType(nodeType()).size();
}
String allocationFailureDetails() {
List<String> reasons = new ArrayList<>();
if (rejectedDueToExclusivity > 0)
reasons.add("host exclusivity constraints");
if (rejectedDueToClashingParentHost > 0)
reasons.add("insufficient nodes available on separate physical hosts");
if (wasRetiredJustNow > 0)
reasons.add("retirement of allocated nodes");
if (rejectedDueToInsufficientRealResources > 0)
reasons.add("insufficient real resources on hosts");
if (reasons.isEmpty()) return "";
return ": Not enough suitable nodes available due to " + String.join(", ", reasons);
}
private static Integer parseIndex(String hostname) {
try {
return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1"));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e);
}
}
/** Possible causes of node retirement */
private enum Retirement {
alreadyRetired("node is already retired"),
outsideRealLimits("node real resources is outside limits"),
violatesParentHostPolicy("node violates parent host policy"),
incompatibleResources("node resources are incompatible"),
hardRequest("node is requested and required to retire"),
softRequest("node is requested to retire"),
violatesExclusivity("node violates host exclusivity"),
violatesHostFlavor("node violates host flavor"),
violatesHostFlavorGeneration("node violates host flavor generation"),
violatesSpares("node is assigned to a host we want to use as a spare"),
none("");
private final String description;
Retirement(String description) {
this.description = description;
}
/** Human-readable description of this cause */
public String description() {
return description;
}
}
/** A host deficit, the number of missing hosts, for a deployment */
record HostDeficit(NodeResources resources, int count, boolean dueToFlavorUpgrade) {
@Override
public String toString() {
return "deficit of " + count + " nodes with " + resources + (dueToFlavorUpgrade ? ", due to flavor upgrade" : "");
}
}
} | class NodeAllocation {
private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName());
/** List of all nodes in node-repository */
private final NodeList allNodes;
/** The application this list is for */
private final ApplicationId application;
/** The cluster this list is for */
private final ClusterSpec cluster;
/** The requested nodes of this list */
private final NodeSpec requested;
/** The node candidates this has accepted so far, keyed on hostname */
private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>();
/** The number of already allocated nodes of compatible size */
private int acceptedAndCompatible = 0;
/** The number of already allocated nodes which can be made compatible */
private int acceptedAndCompatibleOrResizable = 0;
/** The number of nodes rejected because of clashing parentHostname */
private int rejectedDueToClashingParentHost = 0;
/** The number of nodes rejected due to exclusivity constraints */
private int rejectedDueToExclusivity = 0;
private int rejectedDueToInsufficientRealResources = 0;
/** The number of nodes that just now was changed to retired */
private int wasRetiredJustNow = 0;
/** The number of nodes that just now was changed to retired to upgrade its host flavor */
private int wasRetiredDueToFlavorUpgrade = 0;
/** The node indexes to verify uniqueness of each member's index */
private final Set<Integer> indexes = new HashSet<>();
/** The next membership index to assign to a new node */
private final Supplier<Integer> nextIndex;
private final NodeRepository nodeRepository;
private final Optional<String> requiredHostFlavor;
private final boolean makeExclusive;
NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requested,
Supplier<Integer> nextIndex, NodeRepository nodeRepository, boolean makeExclusive) {
this.allNodes = allNodes;
this.application = application;
this.cluster = cluster;
this.requested = requested;
this.nextIndex = nextIndex;
this.nodeRepository = nodeRepository;
this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource())
.with(FetchVector.Dimension.INSTANCE_ID, application.serializedForm())
.with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name())
.with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value())
.value())
.filter(s -> !s.isBlank());
this.makeExclusive = makeExclusive;
}
/**
* Offer some nodes to this. The nodes may have an allocation to a different application or cluster,
* an allocation to this cluster, or no current allocation (in which case one is assigned).
*
* Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily
* reject allocated nodes due to index duplicates.
*
* @param candidates the nodes which are potentially on offer. These may belong to a different application etc.
*/
void offer(List<NodeCandidate> candidates) {
for (NodeCandidate candidate : candidates) {
if (candidate.allocation().isPresent()) {
Allocation allocation = candidate.allocation().get();
ClusterMembership membership = allocation.membership();
if ( ! allocation.owner().equals(application)) continue;
if ( ! membership.cluster().satisfies(cluster)) continue;
if (candidate.state() == Node.State.active && allocation.removable()) continue;
if (candidate.state() == Node.State.active && candidate.wantToFail()) continue;
if (indexes.contains(membership.index())) continue;
if (nodeRepository.zone().cloud().allowEnclave() && candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requested.cloudAccount())) continue;
boolean resizeable = requested.considerRetiring() && candidate.isResizable;
if (( ! saturated() && hasCompatibleResources(candidate) && requested.acceptable(candidate)) || acceptIncompatible(candidate)) {
candidate = candidate.withNode();
if (candidate.isValid())
acceptNode(candidate, shouldRetire(candidate, candidates), resizeable);
}
}
else if ( ! saturated() && hasCompatibleResources(candidate)) {
if ( ! nodeRepository.nodeResourceLimits().isWithinRealLimits(candidate, application, cluster)) {
++rejectedDueToInsufficientRealResources;
continue;
}
if ( violatesParentHostPolicy(candidate)) {
++rejectedDueToClashingParentHost;
continue;
}
switch (violatesExclusivity(candidate, makeExclusive)) {
case PARENT_HOST_NOT_EXCLUSIVE -> candidate = candidate.withExclusiveParent(true);
case NONE -> {}
case YES -> {
++rejectedDueToExclusivity;
continue;
}
}
if (candidate.wantToRetire()) {
continue;
}
candidate = candidate.allocate(application,
ClusterMembership.from(cluster, nextIndex.get()),
requested.resources().orElse(candidate.resources()),
nodeRepository.clock().instant());
if (candidate.isValid()) {
acceptNode(candidate, Retirement.none, false);
}
}
}
}
/** Returns the cause of retirement for given candidate */
private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) {
if ( ! requested.considerRetiring()) {
boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false);
return alreadyRetired ? Retirement.alreadyRetired : Retirement.none;
}
if ( ! nodeRepository.nodeResourceLimits().isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits;
if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy;
if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources;
if (candidate.parent.map(node -> node.status().wantToUpgradeFlavor()).orElse(false)) return Retirement.violatesHostFlavorGeneration;
if (candidate.wantToRetire()) return Retirement.hardRequest;
if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest;
if (violatesExclusivity(candidate, makeExclusive) != NodeCandidate.ExclusivityViolation.NONE) return Retirement.violatesExclusivity;
if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor;
if (candidate.violatesSpares) return Retirement.violatesSpares;
return Retirement.none;
}
private boolean violatesParentHostPolicy(NodeCandidate candidate) {
return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate);
}
private boolean checkForClashingParentHost() {
return nodeRepository.zone().system() == SystemName.main &&
nodeRepository.zone().environment().isProduction() &&
! application.instance().isTester();
}
private NodeCandidate.ExclusivityViolation violatesExclusivity(NodeCandidate candidate, boolean makeExclusive) {
return candidate.violatesExclusivity(cluster, application,
nodeRepository.exclusiveAllocation(cluster),
nodeRepository.exclusiveProvisioning(cluster),
nodeRepository.zone().cloud().allowHostSharing(), allNodes, makeExclusive);
}
/**
* Returns whether this node should be accepted into the cluster even if it is not currently desired
* (already enough nodes, or wrong resources, etc.).
* Such nodes will be marked retired during finalization of the list of accepted nodes when allowed.
* The conditions for this are:
*
* - We are forced to accept since we cannot remove gracefully (bootstrap).
*
* - This is a stateful node. These must always be retired before being removed to allow the cluster to
* migrate away data.
*
* - This is a container node and it is not desired due to having the wrong flavor. In this case this
* will (normally) obtain for all the current nodes in the cluster and so retiring before removing must
* be used to avoid removing all the current nodes at once, before the newly allocated replacements are
* initialized. (In the other case, where a container node is not desired because we have enough nodes we
* do want to remove it immediately to get immediate feedback on how the size reduction works out.)
*/
private boolean acceptIncompatible(NodeCandidate candidate) {
if (candidate.state() != Node.State.active) return false;
if (candidate.allocation().get().membership().retired()) return true;
if ( ! requested.considerRetiring())
return true;
return cluster.isStateful() ||
(cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate));
}
private boolean hasCompatibleResources(NodeCandidate candidate) {
return requested.isCompatible(candidate.resources()) || candidate.isResizable;
}
private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) {
Node node = candidate.toNode();
if (node.allocation().isPresent())
node = node.with(node.allocation().get().withRequestedResources(requested.resources().orElse(node.resources())));
if (retirement == Retirement.none) {
if (node.allocation().isEmpty()
|| (canBeUsedInGroupWithDeficiency(node) &&
! (requested.needsResize(node) && (node.allocation().get().membership().retired() || ! requested.considerRetiring())))) {
acceptedAndCompatible++;
}
if (hasCompatibleResources(candidate))
acceptedAndCompatibleOrResizable++;
if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired()))
node = resize(node);
if (node.state() != Node.State.active)
node = node.unretire().removable(false);
} else if (retirement != Retirement.alreadyRetired) {
LOG.info("Retiring " + node + " because " + retirement.description());
++wasRetiredJustNow;
if (retirement == Retirement.violatesHostFlavorGeneration) {
++wasRetiredDueToFlavorUpgrade;
}
node = node.retire(nodeRepository.clock().instant());
}
if ( ! node.allocation().get().membership().cluster().equals(cluster)) {
node = setCluster(cluster.with(node.allocation().get().membership().cluster().group()), node);
}
candidate = candidate.withNode(node, retirement != Retirement.none && retirement != Retirement.alreadyRetired );
indexes.add(node.allocation().get().membership().index());
nodes.put(node.hostname(), candidate);
return node;
}
private boolean canBeUsedInGroupWithDeficiency(Node node) {
if (requested.count().isEmpty()) return true;
if (node.allocation().isEmpty()) return true;
var group = node.allocation().get().membership().cluster().group();
if (group.isEmpty()) return true;
long nodesInGroup = nodes.values().stream().filter(n -> groupOf(n).equals(group)).count();
return nodesInGroup < requested.groupSize();
}
private Optional<ClusterSpec.Group> groupOf(NodeCandidate candidate) {
return candidate.allocation().flatMap(a -> a.membership().cluster().group());
}
private Node resize(Node node) {
NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources();
return node.with(new Flavor(requested.resources().get()
.with(hostResources.diskSpeed())
.with(hostResources.storageType())
.with(hostResources.architecture())),
Agent.application, nodeRepository.clock().instant());
}
private Node setCluster(ClusterSpec cluster, Node node) {
ClusterMembership membership = node.allocation().get().membership().with(cluster);
return node.with(node.allocation().get().with(membership));
}
/** Returns true if no more nodes are needed in this list */
public boolean saturated() {
return requested.saturatedBy(acceptedAndCompatible);
}
/** Returns true if the content of this list is sufficient to meet the request */
boolean fulfilled() {
return requested.fulfilledBy(acceptedAndCompatibleOrResizable());
}
/** Returns true if this allocation was already fulfilled and resulted in no new changes */
boolean fulfilledAndNoChanges() {
return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty();
}
/** Returns true if this allocation has retired nodes */
boolean hasRetiredJustNow() {
return wasRetiredJustNow > 0;
}
/**
* Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}.
*
* @return empty if the requested spec is already fulfilled. Otherwise, returns {@link HostDeficit} containing the
* flavor and host count required to cover the deficit.
*/
Optional<HostDeficit> hostDeficit() {
if (nodeType().isHost()) {
return Optional.empty();
}
int deficit = requested.fulfilledDeficitCount(acceptedAndCompatibleOrResizable());
boolean dueToFlavorUpgrade = deficit == wasRetiredDueToFlavorUpgrade;
return Optional.of(new HostDeficit(requested.resources().orElseGet(NodeResources::unspecified),
deficit,
dueToFlavorUpgrade))
.filter(hostDeficit -> hostDeficit.count() > 0);
}
/** Returns the indices to use when provisioning hosts for this */
List<Integer> provisionIndices(int count) {
if (count < 1) throw new IllegalArgumentException("Count must be positive");
NodeType hostType = requested.type().hostType();
if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count);
Set<Integer> currentIndices = allNodes.nodeType(hostType)
.not().state(Node.State.deprovisioned)
.hostnames()
.stream()
.map(NodeAllocation::parseIndex)
.collect(Collectors.toSet());
List<Integer> indices = new ArrayList<>(count);
for (int i = 1; indices.size() < count; i++) {
if (!currentIndices.contains(i)) {
indices.add(i);
}
}
Integer myIndex = parseIndex(HostName.getLocalhost());
indices.remove(myIndex);
return indices;
}
/** The node type this is allocating */
NodeType nodeType() {
return requested.type();
}
List<Node> parentsRequiredToBeExclusive() {
return nodes.values()
.stream()
.filter(candidate -> candidate.exclusiveParent)
.map(candidate -> candidate.parent.orElseThrow())
.toList();
}
List<Node> finalNodes() {
GroupAssigner groupAssigner = new GroupAssigner(requested, allNodes, nodeRepository.clock());
Collection<NodeCandidate> finalNodes = groupAssigner.assignTo(nodes.values());
nodes.clear();
finalNodes.forEach(candidate -> nodes.put(candidate.toNode().hostname(), candidate));
return finalNodes.stream().map(NodeCandidate::toNode).toList();
}
List<Node> reservableNodes() {
EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved);
return matching(n -> ! n.isNew && reservableStates.contains(n.state())).toList();
}
List<Node> newNodes() {
return matching(node -> node.isNew).toList();
}
private Stream<Node> matching(Predicate<NodeCandidate> predicate) {
return nodes.values().stream().filter(predicate).map(NodeCandidate::toNode);
}
/** Returns the number of nodes accepted this far */
private int acceptedAndCompatibleOrResizable() {
if (nodeType() == NodeType.tenant) return acceptedAndCompatibleOrResizable;
return allNodes.nodeType(nodeType()).size();
}
String allocationFailureDetails() {
List<String> reasons = new ArrayList<>();
if (rejectedDueToExclusivity > 0)
reasons.add("host exclusivity constraints");
if (rejectedDueToClashingParentHost > 0)
reasons.add("insufficient nodes available on separate physical hosts");
if (wasRetiredJustNow > 0)
reasons.add("retirement of allocated nodes");
if (rejectedDueToInsufficientRealResources > 0)
reasons.add("insufficient real resources on hosts");
if (reasons.isEmpty()) return "";
return ": Not enough suitable nodes available due to " + String.join(", ", reasons);
}
private static Integer parseIndex(String hostname) {
try {
return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1"));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e);
}
}
/** Possible causes of node retirement */
private enum Retirement {
alreadyRetired("node is already retired"),
outsideRealLimits("node real resources is outside limits"),
violatesParentHostPolicy("node violates parent host policy"),
incompatibleResources("node resources are incompatible"),
hardRequest("node is requested and required to retire"),
softRequest("node is requested to retire"),
violatesExclusivity("node violates host exclusivity"),
violatesHostFlavor("node violates host flavor"),
violatesHostFlavorGeneration("node violates host flavor generation"),
violatesSpares("node is assigned to a host we want to use as a spare"),
none("");
private final String description;
Retirement(String description) {
this.description = description;
}
/** Human-readable description of this cause */
public String description() {
return description;
}
}
/** A host deficit, the number of missing hosts, for a deployment */
record HostDeficit(NodeResources resources, int count, boolean dueToFlavorUpgrade) {
@Override
public String toString() {
return "deficit of " + count + " nodes with " + resources + (dueToFlavorUpgrade ? ", due to flavor upgrade" : "");
}
}
} |
Single `*` is emphasis, not sure if you meant that, usually this would be bold (`**`)? | private void notifyMidCheckIn(Tenant tenant) {
var consoleMsg = "You're halfway through the *14 day* trial period. [Manage plan](%s)".formatted(billingUrl(tenant));
queueNotification(tenant, consoleMsg, "How is your Vespa Cloud trial going?",
"How is your Vespa Cloud trial going? " +
"Please reach out to us if you have any questions or feedback.");
} | var consoleMsg = "You're halfway through the *14 day* trial period. [Manage plan](%s)".formatted(billingUrl(tenant)); | private void notifyMidCheckIn(Tenant tenant) {
var consoleMsg = "You're halfway through the **14 day** trial period. [Manage plan](%s)".formatted(billingUrl(tenant));
queueNotification(tenant, consoleMsg, "How is your Vespa Cloud trial going?",
"How is your Vespa Cloud trial going? " +
"Please reach out to us if you have any questions or feedback.");
} | class CloudTrialExpirer extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(CloudTrialExpirer.class.getName());
private static final Duration nonePlanAfter = Duration.ofDays(14);
private static final Duration tombstoneAfter = Duration.ofDays(91);
private final ListFlag<String> extendedTrialTenants;
private final BooleanFlag cloudTrialNotificationEnabled;
public CloudTrialExpirer(Controller controller, Duration interval) {
super(controller, interval, null, SystemName.allOf(SystemName::isPublic));
this.extendedTrialTenants = PermanentFlags.EXTENDED_TRIAL_TENANTS.bindTo(controller().flagSource());
this.cloudTrialNotificationEnabled = Flags.CLOUD_TRIAL_NOTIFICATIONS.bindTo(controller().flagSource());
}
@Override
protected double maintain() {
var a = tombstoneNonePlanTenants();
var b = moveInactiveTenantsToNonePlan();
var c = notifyTenants();
return (a ? 0.0 : -(1D/3)) + (b ? 0.0 : -(1D/3) + (c ? 0.0 : -(1D/3)));
}
private boolean moveInactiveTenantsToNonePlan() {
var idleTrialTenants = controller().tenants().asList().stream()
.filter(this::tenantIsCloudTenant)
.filter(this::tenantIsNotExemptFromExpiry)
.filter(this::tenantHasNoDeployments)
.filter(this::tenantHasTrialPlan)
.filter(tenantReadersNotLoggedIn(nonePlanAfter))
.toList();
if (! idleTrialTenants.isEmpty()) {
var tenants = idleTrialTenants.stream().map(Tenant::name).map(TenantName::value).collect(Collectors.joining(", "));
log.info("Setting tenants to 'none' plan: " + tenants);
}
return setPlanNone(idleTrialTenants);
}
private boolean tombstoneNonePlanTenants() {
var idleOldPlanTenants = controller().tenants().asList().stream()
.filter(this::tenantIsCloudTenant)
.filter(this::tenantIsNotExemptFromExpiry)
.filter(this::tenantHasNoDeployments)
.filter(this::tenantHasNonePlan)
.filter(tenantReadersNotLoggedIn(tombstoneAfter))
.toList();
if (! idleOldPlanTenants.isEmpty()) {
var tenants = idleOldPlanTenants.stream().map(Tenant::name).map(TenantName::value).collect(Collectors.joining(", "));
log.info("Setting tenants as tombstoned: " + tenants);
}
return tombstoneTenants(idleOldPlanTenants);
}
private boolean notifyTenants() {
try {
var currentStatus = controller().curator().readTrialNotifications()
.map(TrialNotifications::tenants).orElse(List.of());
log.fine(() -> "Current: %s".formatted(currentStatus));
var currentStatusByTenant = new HashMap<TenantName, TrialNotifications.Status>();
currentStatus.forEach(status -> currentStatusByTenant.put(status.tenant(), status));
var updatedStatus = new ArrayList<TrialNotifications.Status>();
var now = controller().clock().instant();
for (var tenant : controller().tenants().asList()) {
var status = currentStatusByTenant.get(tenant.name());
var state = status == null ? UNKNOWN : status.state();
var plan = controller().serviceRegistry().billingController().getPlan(tenant.name()).value();
var ageInDays = Duration.between(tenant.createdAt(), now).toDays();
var enabled = cloudTrialNotificationEnabled.with(FetchVector.Dimension.TENANT_ID, tenant.name().value()).value();
if (!enabled) {
if (status != null) updatedStatus.add(status);
} else if (!List.of("none", "trial").contains(plan)) {
} else if (status == null && "trial".equals(plan) && ageInDays <= 1) {
updatedStatus.add(updatedStatus(tenant, now, SIGNED_UP));
notifySignup(tenant);
} else if ("none".equals(plan) && !List.of(EXPIRED).contains(state)) {
updatedStatus.add(updatedStatus(tenant, now, EXPIRED));
notifyExpired(tenant);
} else if ("trial".equals(plan) && ageInDays >= 13
&& !List.of(EXPIRES_IMMEDIATELY, EXPIRED).contains(state)) {
updatedStatus.add(updatedStatus(tenant, now, EXPIRES_IMMEDIATELY));
notifyExpiresImmediately(tenant);
} else if ("trial".equals(plan) && ageInDays >= 12
&& !List.of(EXPIRES_SOON, EXPIRES_IMMEDIATELY, EXPIRED).contains(state)) {
updatedStatus.add(updatedStatus(tenant, now, EXPIRES_SOON));
notifyExpiresSoon(tenant);
} else if ("trial".equals(plan) && ageInDays >= 7
&& !List.of(MID_CHECK_IN, EXPIRES_SOON, EXPIRES_IMMEDIATELY, EXPIRED).contains(state)) {
updatedStatus.add(updatedStatus(tenant, now, MID_CHECK_IN));
notifyMidCheckIn(tenant);
} else {
updatedStatus.add(status);
}
}
log.fine(() -> "Updated: %s".formatted(updatedStatus));
controller().curator().writeTrialNotifications(new TrialNotifications(updatedStatus));
return true;
} catch (Exception e) {
log.log(Level.WARNING, "Failed to process trial notifications", e);
return false;
}
}
private void notifySignup(Tenant tenant) {
var consoleMsg = "Welcome to Vespa Cloud trial! [Manage plan](%s)".formatted(billingUrl(tenant));
queueNotification(tenant, consoleMsg, "Welcome to Vespa Cloud",
"Welcome to Vespa Cloud! We hope you will enjoy your trial. " +
"Please reach out to us if you have any questions or feedback.");
}
private void notifyExpiresSoon(Tenant tenant) {
var consoleMsg = "Your Vespa Cloud trial expires in *2* days. [Manage plan](%s)".formatted(billingUrl(tenant));
queueNotification(tenant, consoleMsg, "Your Vespa Cloud trial expires in 2 days",
"Your Vespa Cloud trial expires in 2 days. " +
"Please reach out to us if you have any questions or feedback.");
}
private void notifyExpiresImmediately(Tenant tenant) {
var consoleMsg = "Your Vespa Cloud trial expires *tomorrow*. [Manage plan](%s)".formatted(billingUrl(tenant));
queueNotification(tenant, consoleMsg, "Your Vespa Cloud trial expires tomorrow",
"Your Vespa Cloud trial expires tomorrow. " +
"Please reach out to us if you have any questions or feedback.");
}
private void notifyExpired(Tenant tenant) {
var consoleMsg = "Your Vespa Cloud trial has expired. [Upgrade plan](%s)".formatted(billingUrl(tenant));
queueNotification(tenant, consoleMsg, "Your Vespa Cloud trial has expired",
"Your Vespa Cloud trial has expired. " +
"Please reach out to us if you have any questions or feedback.");
}
private void queueNotification(Tenant tenant, String consoleMsg, String emailSubject, String emailMsg) {
var mail = Optional.of(Notification.MailContent.fromTemplate(MailTemplating.Template.DEFAULT_MAIL_CONTENT)
.subject(emailSubject)
.with("mailMessageTemplate", "cloud-trial-notification")
.with("cloudTrialMessage", emailMsg)
.with("mailTitle", emailSubject)
.with("consoleLink", controller().serviceRegistry().consoleUrls().tenantOverview(tenant.name()))
.build());
var source = NotificationSource.from(tenant.name());
controller().notificationsDb().removeNotification(source, Notification.Type.account);
controller().notificationsDb().setNotification(
source, Notification.Type.account, Notification.Level.info, consoleMsg, List.of(), mail);
}
private String billingUrl(Tenant t) { return controller().serviceRegistry().consoleUrls().tenantBilling(t.name()); }
private static TrialNotifications.Status updatedStatus(Tenant t, Instant i, TrialNotifications.State s) {
return new TrialNotifications.Status(t.name(), s, i);
}
private boolean tenantIsCloudTenant(Tenant tenant) {
return tenant.type() == Tenant.Type.cloud;
}
private Predicate<Tenant> tenantReadersNotLoggedIn(Duration duration) {
return (Tenant tenant) -> {
var timeLimit = controller().clock().instant().minus(duration);
return tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.map(instant -> instant.isBefore(timeLimit))
.orElse(false);
};
}
private boolean tenantHasTrialPlan(Tenant tenant) {
var planId = controller().serviceRegistry().billingController().getPlan(tenant.name());
return "trial".equals(planId.value());
}
private boolean tenantHasNonePlan(Tenant tenant) {
var planId = controller().serviceRegistry().billingController().getPlan(tenant.name());
return "none".equals(planId.value());
}
private boolean tenantIsNotExemptFromExpiry(Tenant tenant) {
return !extendedTrialTenants.value().contains(tenant.name().value());
}
private boolean tenantHasNoDeployments(Tenant tenant) {
return controller().applications().asList(tenant.name()).stream()
.flatMap(app -> app.instances().values().stream())
.mapToLong(instance -> instance.deployments().values().size())
.sum() == 0;
}
private boolean setPlanNone(List<Tenant> tenants) {
var success = true;
for (var tenant : tenants) {
try {
controller().serviceRegistry().billingController().setPlan(tenant.name(), PlanId.from("none"), false, false);
} catch (RuntimeException e) {
log.info("Could not change plan for " + tenant.name() + ": " + e.getMessage());
success = false;
}
}
return success;
}
private boolean tombstoneTenants(List<Tenant> tenants) {
var success = true;
for (var tenant : tenants) {
success &= deleteApplicationsWithNoDeployments(tenant);
log.fine("Tombstoning empty tenant: " + tenant.name());
try {
controller().tenants().delete(tenant.name(), Optional.empty(), false);
} catch (RuntimeException e) {
log.info("Could not tombstone tenant " + tenant.name() + ": " + e.getMessage());
success = false;
}
}
return success;
}
private boolean deleteApplicationsWithNoDeployments(Tenant tenant) {
var success = true;
for (var application : controller().applications().asList(tenant.name())) {
try {
log.fine("Removing empty application: " + application.id());
controller().applications().deleteApplication(application.id(), Optional.empty());
} catch (RuntimeException e) {
log.info("Could not removing application " + application.id() + ": " + e.getMessage());
success = false;
}
}
return success;
}
} | class CloudTrialExpirer extends ControllerMaintainer {
private static final Logger log = Logger.getLogger(CloudTrialExpirer.class.getName());
private static final Duration nonePlanAfter = Duration.ofDays(14);
private static final Duration tombstoneAfter = Duration.ofDays(91);
private final ListFlag<String> extendedTrialTenants;
private final BooleanFlag cloudTrialNotificationEnabled;
public CloudTrialExpirer(Controller controller, Duration interval) {
super(controller, interval, null, SystemName.allOf(SystemName::isPublic));
this.extendedTrialTenants = PermanentFlags.EXTENDED_TRIAL_TENANTS.bindTo(controller().flagSource());
this.cloudTrialNotificationEnabled = Flags.CLOUD_TRIAL_NOTIFICATIONS.bindTo(controller().flagSource());
}
@Override
protected double maintain() {
var a = tombstoneNonePlanTenants();
var b = moveInactiveTenantsToNonePlan();
var c = notifyTenants();
return (a ? 0.0 : -(1D/3)) + (b ? 0.0 : -(1D/3) + (c ? 0.0 : -(1D/3)));
}
private boolean moveInactiveTenantsToNonePlan() {
var idleTrialTenants = controller().tenants().asList().stream()
.filter(this::tenantIsCloudTenant)
.filter(this::tenantIsNotExemptFromExpiry)
.filter(this::tenantHasNoDeployments)
.filter(this::tenantHasTrialPlan)
.filter(tenantReadersNotLoggedIn(nonePlanAfter))
.toList();
if (! idleTrialTenants.isEmpty()) {
var tenants = idleTrialTenants.stream().map(Tenant::name).map(TenantName::value).collect(Collectors.joining(", "));
log.info("Setting tenants to 'none' plan: " + tenants);
}
return setPlanNone(idleTrialTenants);
}
private boolean tombstoneNonePlanTenants() {
var idleOldPlanTenants = controller().tenants().asList().stream()
.filter(this::tenantIsCloudTenant)
.filter(this::tenantIsNotExemptFromExpiry)
.filter(this::tenantHasNoDeployments)
.filter(this::tenantHasNonePlan)
.filter(tenantReadersNotLoggedIn(tombstoneAfter))
.toList();
if (! idleOldPlanTenants.isEmpty()) {
var tenants = idleOldPlanTenants.stream().map(Tenant::name).map(TenantName::value).collect(Collectors.joining(", "));
log.info("Setting tenants as tombstoned: " + tenants);
}
return tombstoneTenants(idleOldPlanTenants);
}
private boolean notifyTenants() {
try {
var currentStatus = controller().curator().readTrialNotifications()
.map(TrialNotifications::tenants).orElse(List.of());
log.fine(() -> "Current: %s".formatted(currentStatus));
var currentStatusByTenant = new HashMap<TenantName, TrialNotifications.Status>();
currentStatus.forEach(status -> currentStatusByTenant.put(status.tenant(), status));
var updatedStatus = new ArrayList<TrialNotifications.Status>();
var now = controller().clock().instant();
for (var tenant : controller().tenants().asList()) {
var status = currentStatusByTenant.get(tenant.name());
var state = status == null ? UNKNOWN : status.state();
var plan = controller().serviceRegistry().billingController().getPlan(tenant.name()).value();
var ageInDays = Duration.between(tenant.createdAt(), now).toDays();
var enabled = cloudTrialNotificationEnabled.with(FetchVector.Dimension.TENANT_ID, tenant.name().value()).value();
if (!enabled) {
if (status != null) updatedStatus.add(status);
} else if (!List.of("none", "trial").contains(plan)) {
} else if (status == null && "trial".equals(plan) && ageInDays <= 1) {
updatedStatus.add(updatedStatus(tenant, now, SIGNED_UP));
notifySignup(tenant);
} else if ("none".equals(plan) && !List.of(EXPIRED).contains(state)) {
updatedStatus.add(updatedStatus(tenant, now, EXPIRED));
notifyExpired(tenant);
} else if ("trial".equals(plan) && ageInDays >= 13
&& !List.of(EXPIRES_IMMEDIATELY, EXPIRED).contains(state)) {
updatedStatus.add(updatedStatus(tenant, now, EXPIRES_IMMEDIATELY));
notifyExpiresImmediately(tenant);
} else if ("trial".equals(plan) && ageInDays >= 12
&& !List.of(EXPIRES_SOON, EXPIRES_IMMEDIATELY, EXPIRED).contains(state)) {
updatedStatus.add(updatedStatus(tenant, now, EXPIRES_SOON));
notifyExpiresSoon(tenant);
} else if ("trial".equals(plan) && ageInDays >= 7
&& !List.of(MID_CHECK_IN, EXPIRES_SOON, EXPIRES_IMMEDIATELY, EXPIRED).contains(state)) {
updatedStatus.add(updatedStatus(tenant, now, MID_CHECK_IN));
notifyMidCheckIn(tenant);
} else {
updatedStatus.add(status);
}
}
log.fine(() -> "Updated: %s".formatted(updatedStatus));
controller().curator().writeTrialNotifications(new TrialNotifications(updatedStatus));
return true;
} catch (Exception e) {
log.log(Level.WARNING, "Failed to process trial notifications", e);
return false;
}
}
private void notifySignup(Tenant tenant) {
var consoleMsg = "Welcome to Vespa Cloud trial! [Manage plan](%s)".formatted(billingUrl(tenant));
queueNotification(tenant, consoleMsg, "Welcome to Vespa Cloud",
"Welcome to Vespa Cloud! We hope you will enjoy your trial. " +
"Please reach out to us if you have any questions or feedback.");
}
private void notifyExpiresSoon(Tenant tenant) {
var consoleMsg = "Your Vespa Cloud trial expires in **2** days. [Manage plan](%s)".formatted(billingUrl(tenant));
queueNotification(tenant, consoleMsg, "Your Vespa Cloud trial expires in 2 days",
"Your Vespa Cloud trial expires in 2 days. " +
"Please reach out to us if you have any questions or feedback.");
}
private void notifyExpiresImmediately(Tenant tenant) {
var consoleMsg = "Your Vespa Cloud trial expires **tomorrow**. [Manage plan](%s)".formatted(billingUrl(tenant));
queueNotification(tenant, consoleMsg, "Your Vespa Cloud trial expires tomorrow",
"Your Vespa Cloud trial expires tomorrow. " +
"Please reach out to us if you have any questions or feedback.");
}
private void notifyExpired(Tenant tenant) {
var consoleMsg = "Your Vespa Cloud trial has expired. [Upgrade plan](%s)".formatted(billingUrl(tenant));
queueNotification(tenant, consoleMsg, "Your Vespa Cloud trial has expired",
"Your Vespa Cloud trial has expired. " +
"Please reach out to us if you have any questions or feedback.");
}
private void queueNotification(Tenant tenant, String consoleMsg, String emailSubject, String emailMsg) {
var mail = Optional.of(Notification.MailContent.fromTemplate(MailTemplating.Template.DEFAULT_MAIL_CONTENT)
.subject(emailSubject)
.with("mailMessageTemplate", "cloud-trial-notification")
.with("cloudTrialMessage", emailMsg)
.with("mailTitle", emailSubject)
.with("consoleLink", controller().serviceRegistry().consoleUrls().tenantOverview(tenant.name()))
.build());
var source = NotificationSource.from(tenant.name());
controller().notificationsDb().removeNotification(source, Notification.Type.account);
controller().notificationsDb().setNotification(
source, Notification.Type.account, Notification.Level.info, consoleMsg, List.of(), mail);
}
private String billingUrl(Tenant t) { return controller().serviceRegistry().consoleUrls().tenantBilling(t.name()); }
private static TrialNotifications.Status updatedStatus(Tenant t, Instant i, TrialNotifications.State s) {
return new TrialNotifications.Status(t.name(), s, i);
}
private boolean tenantIsCloudTenant(Tenant tenant) {
return tenant.type() == Tenant.Type.cloud;
}
private Predicate<Tenant> tenantReadersNotLoggedIn(Duration duration) {
return (Tenant tenant) -> {
var timeLimit = controller().clock().instant().minus(duration);
return tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.map(instant -> instant.isBefore(timeLimit))
.orElse(false);
};
}
private boolean tenantHasTrialPlan(Tenant tenant) {
var planId = controller().serviceRegistry().billingController().getPlan(tenant.name());
return "trial".equals(planId.value());
}
private boolean tenantHasNonePlan(Tenant tenant) {
var planId = controller().serviceRegistry().billingController().getPlan(tenant.name());
return "none".equals(planId.value());
}
private boolean tenantIsNotExemptFromExpiry(Tenant tenant) {
return !extendedTrialTenants.value().contains(tenant.name().value());
}
private boolean tenantHasNoDeployments(Tenant tenant) {
return controller().applications().asList(tenant.name()).stream()
.flatMap(app -> app.instances().values().stream())
.mapToLong(instance -> instance.deployments().values().size())
.sum() == 0;
}
private boolean setPlanNone(List<Tenant> tenants) {
var success = true;
for (var tenant : tenants) {
try {
controller().serviceRegistry().billingController().setPlan(tenant.name(), PlanId.from("none"), false, false);
} catch (RuntimeException e) {
log.info("Could not change plan for " + tenant.name() + ": " + e.getMessage());
success = false;
}
}
return success;
}
private boolean tombstoneTenants(List<Tenant> tenants) {
var success = true;
for (var tenant : tenants) {
success &= deleteApplicationsWithNoDeployments(tenant);
log.fine("Tombstoning empty tenant: " + tenant.name());
try {
controller().tenants().delete(tenant.name(), Optional.empty(), false);
} catch (RuntimeException e) {
log.info("Could not tombstone tenant " + tenant.name() + ": " + e.getMessage());
success = false;
}
}
return success;
}
private boolean deleteApplicationsWithNoDeployments(Tenant tenant) {
var success = true;
for (var application : controller().applications().asList(tenant.name())) {
try {
log.fine("Removing empty application: " + application.id());
controller().applications().deleteApplication(application.id(), Optional.empty());
} catch (RuntimeException e) {
log.info("Could not removing application " + application.id() + ": " + e.getMessage());
success = false;
}
}
return success;
}
} |
```suggestion ``` | public long activate() {
prepare();
validateSessionStatus(session);
PrepareParams params = this.params.get();
waitForResourcesOrTimeout(params, session, provisioner);
ApplicationId applicationId = session.getApplicationId();
try (ActionTimer timer = applicationRepository.timerFor(applicationId, ConfigServerMetrics.DEPLOYMENT_ACTIVATE_MILLIS.baseName())) {
TimeoutBudget timeoutBudget = params.getTimeoutBudget();
timeoutBudget.assertNotTimedOut(() -> "Timeout exceeded when trying to activate '" + applicationId + "'");
try {
Activation activation = applicationRepository.activate(session, applicationId, tenant, params.force());
waitForActivation(applicationId, timeoutBudget, activation);
} catch (Exception e) {
log.log(Level.FINE, "Activating session " + session.getSessionId() + " failed, deleting it");
throw e;
}
restartServicesIfNeeded(applicationId);
storeReindexing(applicationId, session.getMetaData().getGeneration());
return session.getMetaData().getGeneration();
}
} | log.log(Level.FINE, "Activating session " + session.getSessionId() + " failed, deleting it"); | public long activate() {
prepare();
validateSessionStatus(session);
PrepareParams params = this.params.get();
waitForResourcesOrTimeout(params, session, provisioner);
ApplicationId applicationId = session.getApplicationId();
try (ActionTimer timer = applicationRepository.timerFor(applicationId, ConfigServerMetrics.DEPLOYMENT_ACTIVATE_MILLIS.baseName())) {
TimeoutBudget timeoutBudget = params.getTimeoutBudget();
timeoutBudget.assertNotTimedOut(() -> "Timeout exceeded when trying to activate '" + applicationId + "'");
Activation activation = applicationRepository.activate(session, applicationId, tenant, params.force());
waitForActivation(applicationId, timeoutBudget, activation);
restartServicesIfNeeded(applicationId);
storeReindexing(applicationId, session.getMetaData().getGeneration());
return session.getMetaData().getGeneration();
}
} | class Deployment implements com.yahoo.config.provision.Deployment {
private static final Logger log = Logger.getLogger(Deployment.class.getName());
private static final Duration durationBetweenResourceReadyChecks = Duration.ofSeconds(60);
/** The session containing the application instance to activate */
private final Session session;
private final ApplicationRepository applicationRepository;
private final Supplier<PrepareParams> params;
private final Optional<Provisioner> provisioner;
private final Tenant tenant;
private final DeployLogger deployLogger;
private final Clock clock;
private final boolean internalRedeploy;
private boolean prepared;
private ConfigChangeActions configChangeActions;
private Deployment(Session session, ApplicationRepository applicationRepository, Supplier<PrepareParams> params,
Optional<Provisioner> provisioner, Tenant tenant, DeployLogger deployLogger, Clock clock,
boolean internalRedeploy, boolean prepared) {
this.session = session;
this.applicationRepository = applicationRepository;
this.params = params;
this.provisioner = provisioner;
this.tenant = tenant;
this.deployLogger = deployLogger;
this.clock = clock;
this.internalRedeploy = internalRedeploy;
this.prepared = prepared;
}
public static Deployment unprepared(Session session, ApplicationRepository applicationRepository,
Optional<Provisioner> provisioner, Tenant tenant, PrepareParams params, DeployLogger logger, Clock clock) {
return new Deployment(session, applicationRepository, () -> params, provisioner, tenant, logger, clock, false, false);
}
public static Deployment unprepared(Session session, ApplicationRepository applicationRepository,
Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger,
Duration timeout, Clock clock, boolean validate, boolean isBootstrap) {
Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, !validate, false, true);
return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, true, false);
}
public static Deployment prepared(Session session, ApplicationRepository applicationRepository,
Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger,
Duration timeout, Clock clock, boolean isBootstrap, boolean force) {
Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, false, force, false);
return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, false, true);
}
/** Prepares this. This does nothing if this is already prepared */
@Override
public void prepare() {
if (prepared) return;
PrepareParams params = this.params.get();
try (ActionTimer timer = applicationRepository.timerFor(params.getApplicationId(), ConfigServerMetrics.DEPLOYMENT_PREPARE_MILLIS.baseName())) {
this.configChangeActions = sessionRepository().prepareLocalSession(session, deployLogger, params, clock.instant());
this.prepared = true;
} catch (Exception e) {
log.log(Level.FINE, "Preparing session " + session.getSessionId() + " failed, deleting it");
deleteSession();
throw e;
}
}
/** Activates this. If it is not already prepared, this will call prepare first. */
@Override
private void waitForActivation(ApplicationId applicationId, TimeoutBudget timeoutBudget, Activation activation) {
activation.awaitCompletion(timeoutBudget.timeLeft());
Set<FileReference> fileReferences = applicationRepository.getFileReferences(applicationId);
String fileReferencesText = fileReferences.size() > 10
? " " + fileReferences.size() + " file references"
: "File references: " + fileReferences;
log.log(Level.INFO, session.logPre() + "Session " + session.getSessionId() + " activated successfully using " +
provisioner.map(provisioner -> provisioner.getClass().getSimpleName()).orElse("no host provisioner") +
". Config generation " + session.getMetaData().getGeneration() +
activation.sourceSessionId().stream().mapToObj(id -> ". Based on session " + id).findFirst().orElse("") +
". " + fileReferencesText);
}
private void deleteSession() {
sessionRepository().deleteLocalSession(session.getSessionId());
try (var transaction = sessionRepository().createSetStatusTransaction(session, DELETE)) {
transaction.commit();
}
}
private SessionRepository sessionRepository() {
return tenant.getSessionRepository();
}
private void restartServicesIfNeeded(ApplicationId applicationId) {
if (provisioner.isEmpty() || configChangeActions == null) return;
RestartActions restartActions = configChangeActions.getRestartActions().useForInternalRestart(internalRedeploy);
if (restartActions.isEmpty()) return;
Set<String> hostnames = restartActions.hostnames();
waitForConfigToConverge(applicationId, hostnames);
provisioner.get().restart(applicationId, HostFilter.from(hostnames));
deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s",
hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", "))));
log.info(String.format("%sScheduled service restart of %d nodes: %s",
session.logPre(), hostnames.size(), restartActions.format()));
this.configChangeActions = configChangeActions.withRestartActions(new RestartActions());
}
private void waitForConfigToConverge(ApplicationId applicationId, Set<String> hostnames) {
deployLogger.log(Level.INFO, "Wait for all services to use new config generation before restarting");
var convergenceChecker = applicationRepository.configConvergenceChecker();
var app = applicationRepository.getActiveApplication(applicationId);
ServiceListResponse response = null;
while (timeLeft(applicationId, response)) {
response = convergenceChecker.checkConvergenceUnlessDeferringChangesUntilRestart(app, hostnames);
if (response.converged) {
deployLogger.log(Level.INFO, "Services converged on new config generation " + response.currentGeneration);
return;
} else {
deployLogger.log(Level.INFO, "Services that did not converge on new config generation " +
response.wantedGeneration + ": " +
servicesNotConvergedFormatted(response) + ". Will retry");
try { Thread.sleep(5_000); } catch (InterruptedException e) { /* ignore */ }
}
}
}
private boolean timeLeft(ApplicationId applicationId, ServiceListResponse response) {
try {
params.get().getTimeoutBudget().assertNotTimedOut(
() -> "Timeout exceeded while waiting for config convergence for " + applicationId +
", wanted generation " + response.wantedGeneration + ", these services had another generation: " +
servicesNotConvergedFormatted(response));
} catch (UncheckedTimeoutException e) {
throw new ConfigNotConvergedException(e);
}
return true;
}
private String servicesNotConvergedFormatted(ServiceListResponse response) {
return response.services().stream()
.filter(service -> service.currentGeneration != response.wantedGeneration)
.map(service -> service.serviceInfo.getHostName() + ":" + service.serviceInfo.getServiceName() +
" on generation " + service.currentGeneration)
.collect(Collectors.joining(", "));
}
private void storeReindexing(ApplicationId applicationId, long requiredSession) {
applicationRepository.modifyReindexing(applicationId, reindexing -> {
if (configChangeActions != null)
for (ReindexActions.Entry entry : configChangeActions.getReindexActions().getEntries())
reindexing = reindexing.withPending(entry.getClusterName(), entry.getDocumentType(), requiredSession);
return reindexing;
});
}
/**
* Request a restart of services of this application on hosts matching the filter.
* This is sometimes needed after activation, but can also be requested without
* doing prepare and activate in the same session.
*/
@Override
public void restart(HostFilter filter) {
provisioner.get().restart(session.getApplicationId(), filter);
}
/** Exposes the session of this for testing only */
public Session session() { return session; }
/**
* @return config change actions that need to be performed as result of prepare
* @throws IllegalArgumentException if called without being prepared by this
*/
public ConfigChangeActions configChangeActions() {
if (configChangeActions != null) return configChangeActions;
throw new IllegalArgumentException("No config change actions: " + (prepared ? "was already prepared" : "not yet prepared"));
}
private void validateSessionStatus(Session session) {
long sessionId = session.getSessionId();
if (Session.Status.NEW.equals(session.getStatus())) {
throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is not prepared");
} else if (Session.Status.ACTIVATE.equals(session.getStatus())) {
throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is already active");
}
}
/**
* @param clock system clock
* @param timeout total timeout duration of prepare + activate
* @param session the local session for this deployment
* @param isBootstrap true if this deployment is done to bootstrap the config server
* @param ignoreValidationErrors whether this model should be validated
* @param force whether activation of this model should be forced
*/
private static Supplier<PrepareParams> createPrepareParams(
Clock clock, Duration timeout, Session session,
boolean isBootstrap, boolean ignoreValidationErrors, boolean force, boolean waitForResourcesInPrepare) {
return new Memoized<>(() -> {
TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout);
PrepareParams.Builder params = new PrepareParams.Builder()
.applicationId(session.getApplicationId())
.vespaVersion(session.getVespaVersion().toString())
.timeoutBudget(timeoutBudget)
.ignoreValidationErrors(ignoreValidationErrors)
.isBootstrap(isBootstrap)
.force(force)
.waitForResourcesInPrepare(waitForResourcesInPrepare)
.tenantSecretStores(session.getTenantSecretStores())
.dataplaneTokens(session.getDataplaneTokens());
session.getDockerImageRepository().ifPresent(params::dockerImageRepository);
session.getAthenzDomain().ifPresent(params::athenzDomain);
session.getCloudAccount().ifPresent(params::cloudAccount);
return params.build();
});
}
private static void waitForResourcesOrTimeout(PrepareParams params, Session session, Optional<Provisioner> provisioner) {
if (!params.waitForResourcesInPrepare() || provisioner.isEmpty()) return;
Set<HostSpec> preparedHosts = session.getAllocatedHosts().getHosts();
ActivationContext context = new ActivationContext(session.getSessionId());
AtomicReference<Exception> lastException = new AtomicReference<>();
while (true) {
params.getTimeoutBudget().assertNotTimedOut(
() -> "Timeout exceeded while waiting for application resources of '" + session.getApplicationId() + "'" +
Optional.ofNullable(lastException.get()).map(e -> ". Last exception: " + e.getMessage()).orElse(""));
try (ApplicationMutex lock = provisioner.get().lock(session.getApplicationId())) {
ApplicationTransaction transaction = new ApplicationTransaction(lock, new NestedTransaction());
provisioner.get().activate(preparedHosts, context, transaction);
return;
} catch (ApplicationLockException | TransientException e) {
lastException.set(e);
try {
Thread.sleep(durationBetweenResourceReadyChecks.toMillis());
} catch (InterruptedException e1) {
throw new RuntimeException(e1);
}
}
}
}
} | class Deployment implements com.yahoo.config.provision.Deployment {
private static final Logger log = Logger.getLogger(Deployment.class.getName());
private static final Duration durationBetweenResourceReadyChecks = Duration.ofSeconds(60);
/** The session containing the application instance to activate */
private final Session session;
private final ApplicationRepository applicationRepository;
private final Supplier<PrepareParams> params;
private final Optional<Provisioner> provisioner;
private final Tenant tenant;
private final DeployLogger deployLogger;
private final Clock clock;
private final boolean internalRedeploy;
private boolean prepared;
private ConfigChangeActions configChangeActions;
private Deployment(Session session, ApplicationRepository applicationRepository, Supplier<PrepareParams> params,
Optional<Provisioner> provisioner, Tenant tenant, DeployLogger deployLogger, Clock clock,
boolean internalRedeploy, boolean prepared) {
this.session = session;
this.applicationRepository = applicationRepository;
this.params = params;
this.provisioner = provisioner;
this.tenant = tenant;
this.deployLogger = deployLogger;
this.clock = clock;
this.internalRedeploy = internalRedeploy;
this.prepared = prepared;
}
public static Deployment unprepared(Session session, ApplicationRepository applicationRepository,
Optional<Provisioner> provisioner, Tenant tenant, PrepareParams params, DeployLogger logger, Clock clock) {
return new Deployment(session, applicationRepository, () -> params, provisioner, tenant, logger, clock, false, false);
}
public static Deployment unprepared(Session session, ApplicationRepository applicationRepository,
Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger,
Duration timeout, Clock clock, boolean validate, boolean isBootstrap) {
Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, !validate, false, true);
return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, true, false);
}
public static Deployment prepared(Session session, ApplicationRepository applicationRepository,
Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger,
Duration timeout, Clock clock, boolean isBootstrap, boolean force) {
Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, false, force, false);
return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, false, true);
}
/** Prepares this. This does nothing if this is already prepared */
@Override
public void prepare() {
if (prepared) return;
PrepareParams params = this.params.get();
try (ActionTimer timer = applicationRepository.timerFor(params.getApplicationId(), ConfigServerMetrics.DEPLOYMENT_PREPARE_MILLIS.baseName())) {
this.configChangeActions = sessionRepository().prepareLocalSession(session, deployLogger, params, clock.instant());
this.prepared = true;
} catch (Exception e) {
log.log(Level.FINE, "Preparing session " + session.getSessionId() + " failed, deleting it");
deleteSession();
throw e;
}
}
/** Activates this. If it is not already prepared, this will call prepare first. */
@Override
private void waitForActivation(ApplicationId applicationId, TimeoutBudget timeoutBudget, Activation activation) {
activation.awaitCompletion(timeoutBudget.timeLeft());
Set<FileReference> fileReferences = applicationRepository.getFileReferences(applicationId);
String fileReferencesText = fileReferences.size() > 10
? " " + fileReferences.size() + " file references"
: "File references: " + fileReferences;
log.log(Level.INFO, session.logPre() + "Session " + session.getSessionId() + " activated successfully using " +
provisioner.map(provisioner -> provisioner.getClass().getSimpleName()).orElse("no host provisioner") +
". Config generation " + session.getMetaData().getGeneration() +
activation.sourceSessionId().stream().mapToObj(id -> ". Based on session " + id).findFirst().orElse("") +
". " + fileReferencesText);
}
private void deleteSession() {
sessionRepository().deleteLocalSession(session.getSessionId());
try (var transaction = sessionRepository().createSetStatusTransaction(session, DELETE)) {
transaction.commit();
}
}
private SessionRepository sessionRepository() {
return tenant.getSessionRepository();
}
private void restartServicesIfNeeded(ApplicationId applicationId) {
if (provisioner.isEmpty() || configChangeActions == null) return;
RestartActions restartActions = configChangeActions.getRestartActions().useForInternalRestart(internalRedeploy);
if (restartActions.isEmpty()) return;
Set<String> hostnames = restartActions.hostnames();
waitForConfigToConverge(applicationId, hostnames);
provisioner.get().restart(applicationId, HostFilter.from(hostnames));
deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s",
hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", "))));
log.info(String.format("%sScheduled service restart of %d nodes: %s",
session.logPre(), hostnames.size(), restartActions.format()));
this.configChangeActions = configChangeActions.withRestartActions(new RestartActions());
}
private void waitForConfigToConverge(ApplicationId applicationId, Set<String> hostnames) {
deployLogger.log(Level.INFO, "Wait for all services to use new config generation before restarting");
var convergenceChecker = applicationRepository.configConvergenceChecker();
var app = applicationRepository.getActiveApplication(applicationId);
ServiceListResponse response = null;
while (timeLeft(applicationId, response)) {
response = convergenceChecker.checkConvergenceUnlessDeferringChangesUntilRestart(app, hostnames);
if (response.converged) {
deployLogger.log(Level.INFO, "Services converged on new config generation " + response.currentGeneration);
return;
} else {
deployLogger.log(Level.INFO, "Services that did not converge on new config generation " +
response.wantedGeneration + ": " +
servicesNotConvergedFormatted(response) + ". Will retry");
try { Thread.sleep(5_000); } catch (InterruptedException e) { /* ignore */ }
}
}
}
private boolean timeLeft(ApplicationId applicationId, ServiceListResponse response) {
try {
params.get().getTimeoutBudget().assertNotTimedOut(
() -> "Timeout exceeded while waiting for config convergence for " + applicationId +
", wanted generation " + response.wantedGeneration + ", these services had another generation: " +
servicesNotConvergedFormatted(response));
} catch (UncheckedTimeoutException e) {
throw new ConfigNotConvergedException(e);
}
return true;
}
private String servicesNotConvergedFormatted(ServiceListResponse response) {
return response.services().stream()
.filter(service -> service.currentGeneration != response.wantedGeneration)
.map(service -> service.serviceInfo.getHostName() + ":" + service.serviceInfo.getServiceName() +
" on generation " + service.currentGeneration)
.collect(Collectors.joining(", "));
}
private void storeReindexing(ApplicationId applicationId, long requiredSession) {
applicationRepository.modifyReindexing(applicationId, reindexing -> {
if (configChangeActions != null)
for (ReindexActions.Entry entry : configChangeActions.getReindexActions().getEntries())
reindexing = reindexing.withPending(entry.getClusterName(), entry.getDocumentType(), requiredSession);
return reindexing;
});
}
/**
* Request a restart of services of this application on hosts matching the filter.
* This is sometimes needed after activation, but can also be requested without
* doing prepare and activate in the same session.
*/
@Override
public void restart(HostFilter filter) {
provisioner.get().restart(session.getApplicationId(), filter);
}
/** Exposes the session of this for testing only */
public Session session() { return session; }
/**
* @return config change actions that need to be performed as result of prepare
* @throws IllegalArgumentException if called without being prepared by this
*/
public ConfigChangeActions configChangeActions() {
if (configChangeActions != null) return configChangeActions;
throw new IllegalArgumentException("No config change actions: " + (prepared ? "was already prepared" : "not yet prepared"));
}
private void validateSessionStatus(Session session) {
long sessionId = session.getSessionId();
if (Session.Status.NEW.equals(session.getStatus())) {
throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is not prepared");
} else if (Session.Status.ACTIVATE.equals(session.getStatus())) {
throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is already active");
}
}
/**
* @param clock system clock
* @param timeout total timeout duration of prepare + activate
* @param session the local session for this deployment
* @param isBootstrap true if this deployment is done to bootstrap the config server
* @param ignoreValidationErrors whether this model should be validated
* @param force whether activation of this model should be forced
*/
private static Supplier<PrepareParams> createPrepareParams(
Clock clock, Duration timeout, Session session,
boolean isBootstrap, boolean ignoreValidationErrors, boolean force, boolean waitForResourcesInPrepare) {
return new Memoized<>(() -> {
TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout);
PrepareParams.Builder params = new PrepareParams.Builder()
.applicationId(session.getApplicationId())
.vespaVersion(session.getVespaVersion().toString())
.timeoutBudget(timeoutBudget)
.ignoreValidationErrors(ignoreValidationErrors)
.isBootstrap(isBootstrap)
.force(force)
.waitForResourcesInPrepare(waitForResourcesInPrepare)
.tenantSecretStores(session.getTenantSecretStores())
.dataplaneTokens(session.getDataplaneTokens());
session.getDockerImageRepository().ifPresent(params::dockerImageRepository);
session.getAthenzDomain().ifPresent(params::athenzDomain);
session.getCloudAccount().ifPresent(params::cloudAccount);
return params.build();
});
}
private static void waitForResourcesOrTimeout(PrepareParams params, Session session, Optional<Provisioner> provisioner) {
if (!params.waitForResourcesInPrepare() || provisioner.isEmpty()) return;
Set<HostSpec> preparedHosts = session.getAllocatedHosts().getHosts();
ActivationContext context = new ActivationContext(session.getSessionId());
AtomicReference<Exception> lastException = new AtomicReference<>();
while (true) {
params.getTimeoutBudget().assertNotTimedOut(
() -> "Timeout exceeded while waiting for application resources of '" + session.getApplicationId() + "'" +
Optional.ofNullable(lastException.get()).map(e -> ". Last exception: " + e.getMessage()).orElse(""));
try (ApplicationMutex lock = provisioner.get().lock(session.getApplicationId())) {
ApplicationTransaction transaction = new ApplicationTransaction(lock, new NestedTransaction());
provisioner.get().activate(preparedHosts, context, transaction);
return;
} catch (ApplicationLockException | TransientException e) {
lastException.set(e);
try {
Thread.sleep(durationBetweenResourceReadyChecks.toMillis());
} catch (InterruptedException e1) {
throw new RuntimeException(e1);
}
}
}
}
} |
`null` instead of `Optional.empty`? Missing unwrapping of `id`. | void summarize_bill() {
var req = request("/billing/v2/accountant/bill/id-1/summary?keys=plan,architecture")
.roles(Role.hostedAccountant());
tester.assertResponse(req, """
{"id":"BillId{value='id-1'}","summary":[{"key":{"plan":"paid","architecture":"Optional.empty"},"summary":{"cpu":{"cost":"0","hours":"0"},"memory":{"cost":"0","hours":"0"},"disk":{"cost":"0","hours":"0"},"gpu":{"cost":"0","hours":"0"}}}]}""");
} | {"id":"BillId{value='id-1'}","summary":[{"key":{"plan":"paid","architecture":"Optional.empty"},"summary":{"cpu":{"cost":"0","hours":"0"},"memory":{"cost":"0","hours":"0"},"disk":{"cost":"0","hours":"0"},"gpu":{"cost":"0","hours":"0"}}}]}"""); | void summarize_bill() {
var req = request("/billing/v2/accountant/bill/id-1/summary?keys=plan,architecture")
.roles(Role.hostedAccountant());
tester.assertResponse(req, """
{"id":"BillId{value='id-1'}","summary":[{"key":{"plan":"paid","architecture":null},"summary":{"cpu":{"cost":"0","hours":"0"},"memory":{"cost":"0","hours":"0"},"disk":{"cost":"0","hours":"0"},"gpu":{"cost":"0","hours":"0"}}}]}""");
} | class BillingApiHandlerV2Test extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/responses/";
private static final TenantName tenant = TenantName.from("tenant1");
private static final TenantName tenant2 = TenantName.from("tenant2");
private static final Set<Role> tenantReader = Set.of(Role.reader(tenant));
private static final Set<Role> tenantAdmin = Set.of(Role.administrator(tenant));
private static final Set<Role> financeAdmin = Set.of(Role.hostedAccountant());
private MockBillingController billingController;
private ContainerTester tester;
@BeforeEach
public void before() {
tester = new ContainerTester(container, responseFiles);
tester.controller().tenants().create(new CloudTenantSpec(tenant, ""), new Auth0Credentials(() -> "foo", Set.of(Role.hostedOperator())));
var clock = (ManualClock) tester.controller().serviceRegistry().clock();
clock.setInstant(Instant.parse("2021-04-13T00:00:00Z"));
billingController = (MockBillingController) tester.serviceRegistry().billingController();
billingController.addBill(tenant, createBill(), true);
}
@Override
protected String variablePartXml() {
return " <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControlRequests'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControl'/>\n" +
" <handler id='com.yahoo.vespa.hosted.controller.restapi.billing.BillingApiHandlerV2'>\n" +
" <binding>http:
" </handler>\n" +
" <http>\n" +
" <server id='default' port='8080' />\n" +
" <filtering>\n" +
" <request-chain id='default'>\n" +
" <filter id='com.yahoo.vespa.hosted.controller.restapi.filter.ControllerAuthorizationFilter'/>\n" +
" <binding>http:
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n";
}
@Test
void require_tenant_info() {
var request = request("/billing/v2/tenant/" + tenant.value()).roles(tenantReader);
tester.assertResponse(request, "{\"tenant\":\"tenant1\",\"plan\":{\"id\":\"trial\",\"name\":\"Free Trial - for testing purposes\"},\"collection\":\"AUTO\"}");
}
@Test
void require_accountant_for_update_collection() {
var request = request("/billing/v2/tenant/" + tenant.value(), Request.Method.PATCH)
.data("{\"collection\": \"INVOICE\"}");
var forbidden = request.roles(tenantAdmin);
tester.assertResponse(forbidden, """
{
"code" : 403,
"message" : "Access denied"
}""", 403);
var success = request.roles(financeAdmin);
tester.assertResponse(success, """
{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"collection":"INVOICE"}""");
}
@Test
void require_tenant_usage() {
var request = request("/billing/v2/tenant/" + tenant + "/usage").roles(tenantReader);
tester.assertResponse(request, "{\"from\":\"2021-04-13\",\"to\":\"2021-04-13\",\"total\":\"0.00\",\"items\":[]}");
}
@Test
void require_tenant_invoice() {
var listRequest = request("/billing/v2/tenant/" + tenant + "/bill").roles(tenantReader);
tester.assertResponse(listRequest, "{\"invoices\":[{\"id\":\"id-1\",\"from\":\"2020-05-23\",\"to\":\"2020-05-28\",\"total\":\"123.00\",\"status\":\"OPEN\"}]}");
var singleRequest = request("/billing/v2/tenant/" + tenant + "/bill/id-1").roles(tenantReader);
tester.assertResponse(singleRequest, """
{"id":"id-1","from":"2020-05-23","to":"2020-05-28","total":"123.00","status":"OPEN","statusHistory":[{"at":"2020-05-23T00:00:00Z","status":"OPEN"}],"items":[{"id":"some-id","description":"description","amount":"123.00","plan":{"id":"paid","name":"Paid Plan - for testing purposes"},"majorVersion":0,"cpu":{},"memory":{},"disk":{},"gpu":{}}]}""");
}
@Test
void require_accountant_summary() {
var tenantRequest = request("/billing/v2/accountant").roles(tenantReader);
tester.assertResponse(tenantRequest, "{\n" +
" \"code\" : 403,\n" +
" \"message\" : \"Access denied\"\n" +
"}", 403);
var accountantRequest = request("/billing/v2/accountant").roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"tenants":[{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"quota":{"budget":-1.0},"collection":"AUTO","lastBill":"1970-01-01","unbilled":"0.00"}]}""");
}
@Test
void require_accountant_preview() {
var accountantRequest = request("/billing/v2/accountant/preview").roles(Role.hostedAccountant());
billingController.uncommittedBills.put(tenant, createBill());
tester.assertResponse(accountantRequest, """
{"tenants":[{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"quota":{"budget":-1.0},"collection":"AUTO","lastBill":"2020-05-23","unbilled":"123.00"}]}""");
}
@Test
void require_accountant_tenant_preview() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/preview").roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, "{\"id\":\"empty\",\"from\":\"2021-04-13\",\"to\":\"2021-04-12\",\"total\":\"0.00\",\"status\":\"OPEN\",\"statusHistory\":[{\"at\":\"2021-04-13T00:00:00Z\",\"status\":\"OPEN\"}],\"items\":[]}");
}
@Test
void require_accountant_tenant_bill() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/preview", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("{\"from\": \"2020-05-01\",\"to\": \"2020-06-01\"}");
tester.assertResponse(accountantRequest, "{\"message\":\"Created bill id-123\"}");
}
@Test
void require_list_of_all_plans() {
var accountantRequest = request("/billing/v2/accountant/plans")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, "{\"plans\":[{\"id\":\"trial\",\"name\":\"Free Trial - for testing purposes\"},{\"id\":\"paid\",\"name\":\"Paid Plan - for testing purposes\"},{\"id\":\"none\",\"name\":\"None Plan - for testing purposes\"}]}");
}
@Test
void require_additional_items_empty() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"items":[]}""");
}
@Test
void require_additional_items_with_content() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{
"description": "Additional support costs",
"amount": "123.45"
}""");
tester.assertResponse(accountantRequest, """
{"message":"Added line item for tenant tenant1"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"items":[{"id":"line-item-id","description":"Additional support costs","amount":"123.45","plan":{"id":"paid","name":"Paid Plan - for testing purposes"},"majorVersion":0,"cpu":{},"memory":{},"disk":{},"gpu":{}}]}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/item/line-item-id", Request.Method.DELETE)
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"message":"Successfully deleted line item line-item-id"}""");
}
}
@Test
void require_current_plan() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"id":"trial","name":"Free Trial - for testing purposes"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{"id": "paid"}""");
tester.assertResponse(accountantRequest, """
{"message":"Plan: paid"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"id":"paid","name":"Paid Plan - for testing purposes"}""");
}
}
@Test
void require_current_collection() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"collection":"AUTO"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{"collection": "INVOICE"}""");
tester.assertResponse(accountantRequest, """
{"message":"Collection: INVOICE"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"collection":"INVOICE"}""");
}
}
@Test
void require_accountant_tenant() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes","billed":false,"supported":false},"billing":{},"collection":"AUTO"}""");
}
@Test
void lists_accepted_countries() {
var req = request("/billing/v2/countries").roles(tenantReader);
tester.assertJsonResponse(req, new File("accepted-countries.json"));
}
@Test
private static Bill createBill() {
var start = LocalDate.of(2020, 5, 23).atStartOfDay(ZoneOffset.UTC);
var end = start.toLocalDate().plusDays(6).atStartOfDay(ZoneOffset.UTC);
var statusHistory = new StatusHistory(new TreeMap<>(Map.of(start, BillStatus.OPEN)));
return new Bill(
Bill.Id.of("id-1"),
TenantName.defaultName(),
statusHistory,
List.of(createLineItem(start)),
start,
end
);
}
static Bill.LineItem createLineItem(ZonedDateTime addedAt) {
return new Bill.LineItem(
"some-id",
"description",
new BigDecimal("123.00"),
"paid",
"Smith",
addedAt
);
}
} | class BillingApiHandlerV2Test extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/responses/";
private static final TenantName tenant = TenantName.from("tenant1");
private static final TenantName tenant2 = TenantName.from("tenant2");
private static final Set<Role> tenantReader = Set.of(Role.reader(tenant));
private static final Set<Role> tenantAdmin = Set.of(Role.administrator(tenant));
private static final Set<Role> financeAdmin = Set.of(Role.hostedAccountant());
private MockBillingController billingController;
private ContainerTester tester;
@BeforeEach
public void before() {
tester = new ContainerTester(container, responseFiles);
tester.controller().tenants().create(new CloudTenantSpec(tenant, ""), new Auth0Credentials(() -> "foo", Set.of(Role.hostedOperator())));
var clock = (ManualClock) tester.controller().serviceRegistry().clock();
clock.setInstant(Instant.parse("2021-04-13T00:00:00Z"));
billingController = (MockBillingController) tester.serviceRegistry().billingController();
billingController.addBill(tenant, createBill(), true);
}
@Override
protected String variablePartXml() {
return " <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControlRequests'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControl'/>\n" +
" <handler id='com.yahoo.vespa.hosted.controller.restapi.billing.BillingApiHandlerV2'>\n" +
" <binding>http:
" </handler>\n" +
" <http>\n" +
" <server id='default' port='8080' />\n" +
" <filtering>\n" +
" <request-chain id='default'>\n" +
" <filter id='com.yahoo.vespa.hosted.controller.restapi.filter.ControllerAuthorizationFilter'/>\n" +
" <binding>http:
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n";
}
@Test
void require_tenant_info() {
var request = request("/billing/v2/tenant/" + tenant.value()).roles(tenantReader);
tester.assertResponse(request, "{\"tenant\":\"tenant1\",\"plan\":{\"id\":\"trial\",\"name\":\"Free Trial - for testing purposes\"},\"collection\":\"AUTO\"}");
}
@Test
void require_accountant_for_update_collection() {
var request = request("/billing/v2/tenant/" + tenant.value(), Request.Method.PATCH)
.data("{\"collection\": \"INVOICE\"}");
var forbidden = request.roles(tenantAdmin);
tester.assertResponse(forbidden, """
{
"code" : 403,
"message" : "Access denied"
}""", 403);
var success = request.roles(financeAdmin);
tester.assertResponse(success, """
{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"collection":"INVOICE"}""");
}
@Test
void require_tenant_usage() {
var request = request("/billing/v2/tenant/" + tenant + "/usage").roles(tenantReader);
tester.assertResponse(request, "{\"from\":\"2021-04-13\",\"to\":\"2021-04-13\",\"total\":\"0.00\",\"items\":[]}");
}
@Test
void require_tenant_invoice() {
var listRequest = request("/billing/v2/tenant/" + tenant + "/bill").roles(tenantReader);
tester.assertResponse(listRequest, "{\"invoices\":[{\"id\":\"id-1\",\"from\":\"2020-05-23\",\"to\":\"2020-05-28\",\"total\":\"123.00\",\"status\":\"OPEN\"}]}");
var singleRequest = request("/billing/v2/tenant/" + tenant + "/bill/id-1").roles(tenantReader);
tester.assertResponse(singleRequest, """
{"id":"id-1","from":"2020-05-23","to":"2020-05-28","total":"123.00","status":"OPEN","statusHistory":[{"at":"2020-05-23T00:00:00Z","status":"OPEN"}],"items":[{"id":"some-id","description":"description","amount":"123.00","plan":{"id":"paid","name":"Paid Plan - for testing purposes"},"majorVersion":0,"cpu":{},"memory":{},"disk":{},"gpu":{}}]}""");
}
@Test
void require_accountant_summary() {
var tenantRequest = request("/billing/v2/accountant").roles(tenantReader);
tester.assertResponse(tenantRequest, "{\n" +
" \"code\" : 403,\n" +
" \"message\" : \"Access denied\"\n" +
"}", 403);
var accountantRequest = request("/billing/v2/accountant").roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"tenants":[{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"quota":{"budget":-1.0},"collection":"AUTO","lastBill":"1970-01-01","unbilled":"0.00"}]}""");
}
@Test
void require_accountant_preview() {
var accountantRequest = request("/billing/v2/accountant/preview").roles(Role.hostedAccountant());
billingController.uncommittedBills.put(tenant, createBill());
tester.assertResponse(accountantRequest, """
{"tenants":[{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"quota":{"budget":-1.0},"collection":"AUTO","lastBill":"2020-05-23","unbilled":"123.00"}]}""");
}
@Test
void require_accountant_tenant_preview() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/preview").roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, "{\"id\":\"empty\",\"from\":\"2021-04-13\",\"to\":\"2021-04-12\",\"total\":\"0.00\",\"status\":\"OPEN\",\"statusHistory\":[{\"at\":\"2021-04-13T00:00:00Z\",\"status\":\"OPEN\"}],\"items\":[]}");
}
@Test
void require_accountant_tenant_bill() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/preview", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("{\"from\": \"2020-05-01\",\"to\": \"2020-06-01\"}");
tester.assertResponse(accountantRequest, "{\"message\":\"Created bill id-123\"}");
}
@Test
void require_list_of_all_plans() {
var accountantRequest = request("/billing/v2/accountant/plans")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, "{\"plans\":[{\"id\":\"trial\",\"name\":\"Free Trial - for testing purposes\"},{\"id\":\"paid\",\"name\":\"Paid Plan - for testing purposes\"},{\"id\":\"none\",\"name\":\"None Plan - for testing purposes\"}]}");
}
@Test
void require_additional_items_empty() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"items":[]}""");
}
@Test
void require_additional_items_with_content() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{
"description": "Additional support costs",
"amount": "123.45"
}""");
tester.assertResponse(accountantRequest, """
{"message":"Added line item for tenant tenant1"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"items":[{"id":"line-item-id","description":"Additional support costs","amount":"123.45","plan":{"id":"paid","name":"Paid Plan - for testing purposes"},"majorVersion":0,"cpu":{},"memory":{},"disk":{},"gpu":{}}]}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/item/line-item-id", Request.Method.DELETE)
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"message":"Successfully deleted line item line-item-id"}""");
}
}
@Test
void require_current_plan() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"id":"trial","name":"Free Trial - for testing purposes"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{"id": "paid"}""");
tester.assertResponse(accountantRequest, """
{"message":"Plan: paid"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"id":"paid","name":"Paid Plan - for testing purposes"}""");
}
}
@Test
void require_current_collection() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"collection":"AUTO"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{"collection": "INVOICE"}""");
tester.assertResponse(accountantRequest, """
{"message":"Collection: INVOICE"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"collection":"INVOICE"}""");
}
}
@Test
void require_accountant_tenant() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes","billed":false,"supported":false},"billing":{},"collection":"AUTO"}""");
}
@Test
void lists_accepted_countries() {
var req = request("/billing/v2/countries").roles(tenantReader);
tester.assertJsonResponse(req, new File("accepted-countries.json"));
}
@Test
private static Bill createBill() {
var start = LocalDate.of(2020, 5, 23).atStartOfDay(ZoneOffset.UTC);
var end = start.toLocalDate().plusDays(6).atStartOfDay(ZoneOffset.UTC);
var statusHistory = new StatusHistory(new TreeMap<>(Map.of(start, BillStatus.OPEN)));
return new Bill(
Bill.Id.of("id-1"),
TenantName.defaultName(),
statusHistory,
List.of(createLineItem(start)),
start,
end
);
}
static Bill.LineItem createLineItem(ZonedDateTime addedAt) {
return new Bill.LineItem(
"some-id",
"description",
new BigDecimal("123.00"),
"paid",
"Smith",
addedAt
);
}
} |
Good point. Will fix. | void summarize_bill() {
var req = request("/billing/v2/accountant/bill/id-1/summary?keys=plan,architecture")
.roles(Role.hostedAccountant());
tester.assertResponse(req, """
{"id":"BillId{value='id-1'}","summary":[{"key":{"plan":"paid","architecture":"Optional.empty"},"summary":{"cpu":{"cost":"0","hours":"0"},"memory":{"cost":"0","hours":"0"},"disk":{"cost":"0","hours":"0"},"gpu":{"cost":"0","hours":"0"}}}]}""");
} | {"id":"BillId{value='id-1'}","summary":[{"key":{"plan":"paid","architecture":"Optional.empty"},"summary":{"cpu":{"cost":"0","hours":"0"},"memory":{"cost":"0","hours":"0"},"disk":{"cost":"0","hours":"0"},"gpu":{"cost":"0","hours":"0"}}}]}"""); | void summarize_bill() {
var req = request("/billing/v2/accountant/bill/id-1/summary?keys=plan,architecture")
.roles(Role.hostedAccountant());
tester.assertResponse(req, """
{"id":"BillId{value='id-1'}","summary":[{"key":{"plan":"paid","architecture":null},"summary":{"cpu":{"cost":"0","hours":"0"},"memory":{"cost":"0","hours":"0"},"disk":{"cost":"0","hours":"0"},"gpu":{"cost":"0","hours":"0"}}}]}""");
} | class BillingApiHandlerV2Test extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/responses/";
private static final TenantName tenant = TenantName.from("tenant1");
private static final TenantName tenant2 = TenantName.from("tenant2");
private static final Set<Role> tenantReader = Set.of(Role.reader(tenant));
private static final Set<Role> tenantAdmin = Set.of(Role.administrator(tenant));
private static final Set<Role> financeAdmin = Set.of(Role.hostedAccountant());
private MockBillingController billingController;
private ContainerTester tester;
@BeforeEach
public void before() {
tester = new ContainerTester(container, responseFiles);
tester.controller().tenants().create(new CloudTenantSpec(tenant, ""), new Auth0Credentials(() -> "foo", Set.of(Role.hostedOperator())));
var clock = (ManualClock) tester.controller().serviceRegistry().clock();
clock.setInstant(Instant.parse("2021-04-13T00:00:00Z"));
billingController = (MockBillingController) tester.serviceRegistry().billingController();
billingController.addBill(tenant, createBill(), true);
}
@Override
protected String variablePartXml() {
return " <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControlRequests'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControl'/>\n" +
" <handler id='com.yahoo.vespa.hosted.controller.restapi.billing.BillingApiHandlerV2'>\n" +
" <binding>http:
" </handler>\n" +
" <http>\n" +
" <server id='default' port='8080' />\n" +
" <filtering>\n" +
" <request-chain id='default'>\n" +
" <filter id='com.yahoo.vespa.hosted.controller.restapi.filter.ControllerAuthorizationFilter'/>\n" +
" <binding>http:
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n";
}
@Test
void require_tenant_info() {
var request = request("/billing/v2/tenant/" + tenant.value()).roles(tenantReader);
tester.assertResponse(request, "{\"tenant\":\"tenant1\",\"plan\":{\"id\":\"trial\",\"name\":\"Free Trial - for testing purposes\"},\"collection\":\"AUTO\"}");
}
@Test
void require_accountant_for_update_collection() {
var request = request("/billing/v2/tenant/" + tenant.value(), Request.Method.PATCH)
.data("{\"collection\": \"INVOICE\"}");
var forbidden = request.roles(tenantAdmin);
tester.assertResponse(forbidden, """
{
"code" : 403,
"message" : "Access denied"
}""", 403);
var success = request.roles(financeAdmin);
tester.assertResponse(success, """
{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"collection":"INVOICE"}""");
}
@Test
void require_tenant_usage() {
var request = request("/billing/v2/tenant/" + tenant + "/usage").roles(tenantReader);
tester.assertResponse(request, "{\"from\":\"2021-04-13\",\"to\":\"2021-04-13\",\"total\":\"0.00\",\"items\":[]}");
}
@Test
void require_tenant_invoice() {
var listRequest = request("/billing/v2/tenant/" + tenant + "/bill").roles(tenantReader);
tester.assertResponse(listRequest, "{\"invoices\":[{\"id\":\"id-1\",\"from\":\"2020-05-23\",\"to\":\"2020-05-28\",\"total\":\"123.00\",\"status\":\"OPEN\"}]}");
var singleRequest = request("/billing/v2/tenant/" + tenant + "/bill/id-1").roles(tenantReader);
tester.assertResponse(singleRequest, """
{"id":"id-1","from":"2020-05-23","to":"2020-05-28","total":"123.00","status":"OPEN","statusHistory":[{"at":"2020-05-23T00:00:00Z","status":"OPEN"}],"items":[{"id":"some-id","description":"description","amount":"123.00","plan":{"id":"paid","name":"Paid Plan - for testing purposes"},"majorVersion":0,"cpu":{},"memory":{},"disk":{},"gpu":{}}]}""");
}
@Test
void require_accountant_summary() {
var tenantRequest = request("/billing/v2/accountant").roles(tenantReader);
tester.assertResponse(tenantRequest, "{\n" +
" \"code\" : 403,\n" +
" \"message\" : \"Access denied\"\n" +
"}", 403);
var accountantRequest = request("/billing/v2/accountant").roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"tenants":[{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"quota":{"budget":-1.0},"collection":"AUTO","lastBill":"1970-01-01","unbilled":"0.00"}]}""");
}
@Test
void require_accountant_preview() {
var accountantRequest = request("/billing/v2/accountant/preview").roles(Role.hostedAccountant());
billingController.uncommittedBills.put(tenant, createBill());
tester.assertResponse(accountantRequest, """
{"tenants":[{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"quota":{"budget":-1.0},"collection":"AUTO","lastBill":"2020-05-23","unbilled":"123.00"}]}""");
}
@Test
void require_accountant_tenant_preview() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/preview").roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, "{\"id\":\"empty\",\"from\":\"2021-04-13\",\"to\":\"2021-04-12\",\"total\":\"0.00\",\"status\":\"OPEN\",\"statusHistory\":[{\"at\":\"2021-04-13T00:00:00Z\",\"status\":\"OPEN\"}],\"items\":[]}");
}
@Test
void require_accountant_tenant_bill() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/preview", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("{\"from\": \"2020-05-01\",\"to\": \"2020-06-01\"}");
tester.assertResponse(accountantRequest, "{\"message\":\"Created bill id-123\"}");
}
@Test
void require_list_of_all_plans() {
var accountantRequest = request("/billing/v2/accountant/plans")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, "{\"plans\":[{\"id\":\"trial\",\"name\":\"Free Trial - for testing purposes\"},{\"id\":\"paid\",\"name\":\"Paid Plan - for testing purposes\"},{\"id\":\"none\",\"name\":\"None Plan - for testing purposes\"}]}");
}
@Test
void require_additional_items_empty() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"items":[]}""");
}
@Test
void require_additional_items_with_content() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{
"description": "Additional support costs",
"amount": "123.45"
}""");
tester.assertResponse(accountantRequest, """
{"message":"Added line item for tenant tenant1"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"items":[{"id":"line-item-id","description":"Additional support costs","amount":"123.45","plan":{"id":"paid","name":"Paid Plan - for testing purposes"},"majorVersion":0,"cpu":{},"memory":{},"disk":{},"gpu":{}}]}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/item/line-item-id", Request.Method.DELETE)
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"message":"Successfully deleted line item line-item-id"}""");
}
}
@Test
void require_current_plan() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"id":"trial","name":"Free Trial - for testing purposes"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{"id": "paid"}""");
tester.assertResponse(accountantRequest, """
{"message":"Plan: paid"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"id":"paid","name":"Paid Plan - for testing purposes"}""");
}
}
@Test
void require_current_collection() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"collection":"AUTO"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{"collection": "INVOICE"}""");
tester.assertResponse(accountantRequest, """
{"message":"Collection: INVOICE"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"collection":"INVOICE"}""");
}
}
@Test
void require_accountant_tenant() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes","billed":false,"supported":false},"billing":{},"collection":"AUTO"}""");
}
@Test
void lists_accepted_countries() {
var req = request("/billing/v2/countries").roles(tenantReader);
tester.assertJsonResponse(req, new File("accepted-countries.json"));
}
@Test
private static Bill createBill() {
var start = LocalDate.of(2020, 5, 23).atStartOfDay(ZoneOffset.UTC);
var end = start.toLocalDate().plusDays(6).atStartOfDay(ZoneOffset.UTC);
var statusHistory = new StatusHistory(new TreeMap<>(Map.of(start, BillStatus.OPEN)));
return new Bill(
Bill.Id.of("id-1"),
TenantName.defaultName(),
statusHistory,
List.of(createLineItem(start)),
start,
end
);
}
static Bill.LineItem createLineItem(ZonedDateTime addedAt) {
return new Bill.LineItem(
"some-id",
"description",
new BigDecimal("123.00"),
"paid",
"Smith",
addedAt
);
}
} | class BillingApiHandlerV2Test extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/responses/";
private static final TenantName tenant = TenantName.from("tenant1");
private static final TenantName tenant2 = TenantName.from("tenant2");
private static final Set<Role> tenantReader = Set.of(Role.reader(tenant));
private static final Set<Role> tenantAdmin = Set.of(Role.administrator(tenant));
private static final Set<Role> financeAdmin = Set.of(Role.hostedAccountant());
private MockBillingController billingController;
private ContainerTester tester;
@BeforeEach
public void before() {
tester = new ContainerTester(container, responseFiles);
tester.controller().tenants().create(new CloudTenantSpec(tenant, ""), new Auth0Credentials(() -> "foo", Set.of(Role.hostedOperator())));
var clock = (ManualClock) tester.controller().serviceRegistry().clock();
clock.setInstant(Instant.parse("2021-04-13T00:00:00Z"));
billingController = (MockBillingController) tester.serviceRegistry().billingController();
billingController.addBill(tenant, createBill(), true);
}
@Override
protected String variablePartXml() {
return " <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControlRequests'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControl'/>\n" +
" <handler id='com.yahoo.vespa.hosted.controller.restapi.billing.BillingApiHandlerV2'>\n" +
" <binding>http:
" </handler>\n" +
" <http>\n" +
" <server id='default' port='8080' />\n" +
" <filtering>\n" +
" <request-chain id='default'>\n" +
" <filter id='com.yahoo.vespa.hosted.controller.restapi.filter.ControllerAuthorizationFilter'/>\n" +
" <binding>http:
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n";
}
@Test
void require_tenant_info() {
var request = request("/billing/v2/tenant/" + tenant.value()).roles(tenantReader);
tester.assertResponse(request, "{\"tenant\":\"tenant1\",\"plan\":{\"id\":\"trial\",\"name\":\"Free Trial - for testing purposes\"},\"collection\":\"AUTO\"}");
}
@Test
void require_accountant_for_update_collection() {
var request = request("/billing/v2/tenant/" + tenant.value(), Request.Method.PATCH)
.data("{\"collection\": \"INVOICE\"}");
var forbidden = request.roles(tenantAdmin);
tester.assertResponse(forbidden, """
{
"code" : 403,
"message" : "Access denied"
}""", 403);
var success = request.roles(financeAdmin);
tester.assertResponse(success, """
{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"collection":"INVOICE"}""");
}
@Test
void require_tenant_usage() {
var request = request("/billing/v2/tenant/" + tenant + "/usage").roles(tenantReader);
tester.assertResponse(request, "{\"from\":\"2021-04-13\",\"to\":\"2021-04-13\",\"total\":\"0.00\",\"items\":[]}");
}
@Test
void require_tenant_invoice() {
var listRequest = request("/billing/v2/tenant/" + tenant + "/bill").roles(tenantReader);
tester.assertResponse(listRequest, "{\"invoices\":[{\"id\":\"id-1\",\"from\":\"2020-05-23\",\"to\":\"2020-05-28\",\"total\":\"123.00\",\"status\":\"OPEN\"}]}");
var singleRequest = request("/billing/v2/tenant/" + tenant + "/bill/id-1").roles(tenantReader);
tester.assertResponse(singleRequest, """
{"id":"id-1","from":"2020-05-23","to":"2020-05-28","total":"123.00","status":"OPEN","statusHistory":[{"at":"2020-05-23T00:00:00Z","status":"OPEN"}],"items":[{"id":"some-id","description":"description","amount":"123.00","plan":{"id":"paid","name":"Paid Plan - for testing purposes"},"majorVersion":0,"cpu":{},"memory":{},"disk":{},"gpu":{}}]}""");
}
@Test
void require_accountant_summary() {
var tenantRequest = request("/billing/v2/accountant").roles(tenantReader);
tester.assertResponse(tenantRequest, "{\n" +
" \"code\" : 403,\n" +
" \"message\" : \"Access denied\"\n" +
"}", 403);
var accountantRequest = request("/billing/v2/accountant").roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"tenants":[{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"quota":{"budget":-1.0},"collection":"AUTO","lastBill":"1970-01-01","unbilled":"0.00"}]}""");
}
@Test
void require_accountant_preview() {
var accountantRequest = request("/billing/v2/accountant/preview").roles(Role.hostedAccountant());
billingController.uncommittedBills.put(tenant, createBill());
tester.assertResponse(accountantRequest, """
{"tenants":[{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"quota":{"budget":-1.0},"collection":"AUTO","lastBill":"2020-05-23","unbilled":"123.00"}]}""");
}
@Test
void require_accountant_tenant_preview() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/preview").roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, "{\"id\":\"empty\",\"from\":\"2021-04-13\",\"to\":\"2021-04-12\",\"total\":\"0.00\",\"status\":\"OPEN\",\"statusHistory\":[{\"at\":\"2021-04-13T00:00:00Z\",\"status\":\"OPEN\"}],\"items\":[]}");
}
@Test
void require_accountant_tenant_bill() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/preview", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("{\"from\": \"2020-05-01\",\"to\": \"2020-06-01\"}");
tester.assertResponse(accountantRequest, "{\"message\":\"Created bill id-123\"}");
}
@Test
void require_list_of_all_plans() {
var accountantRequest = request("/billing/v2/accountant/plans")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, "{\"plans\":[{\"id\":\"trial\",\"name\":\"Free Trial - for testing purposes\"},{\"id\":\"paid\",\"name\":\"Paid Plan - for testing purposes\"},{\"id\":\"none\",\"name\":\"None Plan - for testing purposes\"}]}");
}
@Test
void require_additional_items_empty() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"items":[]}""");
}
@Test
void require_additional_items_with_content() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{
"description": "Additional support costs",
"amount": "123.45"
}""");
tester.assertResponse(accountantRequest, """
{"message":"Added line item for tenant tenant1"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"items":[{"id":"line-item-id","description":"Additional support costs","amount":"123.45","plan":{"id":"paid","name":"Paid Plan - for testing purposes"},"majorVersion":0,"cpu":{},"memory":{},"disk":{},"gpu":{}}]}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/item/line-item-id", Request.Method.DELETE)
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"message":"Successfully deleted line item line-item-id"}""");
}
}
@Test
void require_current_plan() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"id":"trial","name":"Free Trial - for testing purposes"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{"id": "paid"}""");
tester.assertResponse(accountantRequest, """
{"message":"Plan: paid"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"id":"paid","name":"Paid Plan - for testing purposes"}""");
}
}
@Test
void require_current_collection() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"collection":"AUTO"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{"collection": "INVOICE"}""");
tester.assertResponse(accountantRequest, """
{"message":"Collection: INVOICE"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"collection":"INVOICE"}""");
}
}
@Test
void require_accountant_tenant() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes","billed":false,"supported":false},"billing":{},"collection":"AUTO"}""");
}
@Test
void lists_accepted_countries() {
var req = request("/billing/v2/countries").roles(tenantReader);
tester.assertJsonResponse(req, new File("accepted-countries.json"));
}
@Test
private static Bill createBill() {
var start = LocalDate.of(2020, 5, 23).atStartOfDay(ZoneOffset.UTC);
var end = start.toLocalDate().plusDays(6).atStartOfDay(ZoneOffset.UTC);
var statusHistory = new StatusHistory(new TreeMap<>(Map.of(start, BillStatus.OPEN)));
return new Bill(
Bill.Id.of("id-1"),
TenantName.defaultName(),
statusHistory,
List.of(createLineItem(start)),
start,
end
);
}
static Bill.LineItem createLineItem(ZonedDateTime addedAt) {
return new Bill.LineItem(
"some-id",
"description",
new BigDecimal("123.00"),
"paid",
"Smith",
addedAt
);
}
} |
The `id` field looks wrong | void summarize_bill() {
var req = request("/billing/v2/accountant/bill/id-1/summary?keys=plan,architecture")
.roles(Role.hostedAccountant());
tester.assertResponse(req, """
{"id":"BillId{value='id-1'}","summary":[{"key":{"plan":"paid","architecture":null},"summary":{"cpu":{"cost":"0","hours":"0"},"memory":{"cost":"0","hours":"0"},"disk":{"cost":"0","hours":"0"},"gpu":{"cost":"0","hours":"0"}}}]}""");
} | {"id":"BillId{value='id-1'}","summary":[{"key":{"plan":"paid","architecture":null},"summary":{"cpu":{"cost":"0","hours":"0"},"memory":{"cost":"0","hours":"0"},"disk":{"cost":"0","hours":"0"},"gpu":{"cost":"0","hours":"0"}}}]}"""); | void summarize_bill() {
var req = request("/billing/v2/accountant/bill/id-1/summary?keys=plan,architecture")
.roles(Role.hostedAccountant());
tester.assertResponse(req, """
{"id":"BillId{value='id-1'}","summary":[{"key":{"plan":"paid","architecture":null},"summary":{"cpu":{"cost":"0","hours":"0"},"memory":{"cost":"0","hours":"0"},"disk":{"cost":"0","hours":"0"},"gpu":{"cost":"0","hours":"0"}}}]}""");
} | class BillingApiHandlerV2Test extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/responses/";
private static final TenantName tenant = TenantName.from("tenant1");
private static final TenantName tenant2 = TenantName.from("tenant2");
private static final Set<Role> tenantReader = Set.of(Role.reader(tenant));
private static final Set<Role> tenantAdmin = Set.of(Role.administrator(tenant));
private static final Set<Role> financeAdmin = Set.of(Role.hostedAccountant());
private MockBillingController billingController;
private ContainerTester tester;
@BeforeEach
public void before() {
tester = new ContainerTester(container, responseFiles);
tester.controller().tenants().create(new CloudTenantSpec(tenant, ""), new Auth0Credentials(() -> "foo", Set.of(Role.hostedOperator())));
var clock = (ManualClock) tester.controller().serviceRegistry().clock();
clock.setInstant(Instant.parse("2021-04-13T00:00:00Z"));
billingController = (MockBillingController) tester.serviceRegistry().billingController();
billingController.addBill(tenant, createBill(), true);
}
@Override
protected String variablePartXml() {
return " <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControlRequests'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControl'/>\n" +
" <handler id='com.yahoo.vespa.hosted.controller.restapi.billing.BillingApiHandlerV2'>\n" +
" <binding>http:
" </handler>\n" +
" <http>\n" +
" <server id='default' port='8080' />\n" +
" <filtering>\n" +
" <request-chain id='default'>\n" +
" <filter id='com.yahoo.vespa.hosted.controller.restapi.filter.ControllerAuthorizationFilter'/>\n" +
" <binding>http:
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n";
}
@Test
void require_tenant_info() {
var request = request("/billing/v2/tenant/" + tenant.value()).roles(tenantReader);
tester.assertResponse(request, "{\"tenant\":\"tenant1\",\"plan\":{\"id\":\"trial\",\"name\":\"Free Trial - for testing purposes\"},\"collection\":\"AUTO\"}");
}
@Test
void require_accountant_for_update_collection() {
var request = request("/billing/v2/tenant/" + tenant.value(), Request.Method.PATCH)
.data("{\"collection\": \"INVOICE\"}");
var forbidden = request.roles(tenantAdmin);
tester.assertResponse(forbidden, """
{
"code" : 403,
"message" : "Access denied"
}""", 403);
var success = request.roles(financeAdmin);
tester.assertResponse(success, """
{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"collection":"INVOICE"}""");
}
@Test
void require_tenant_usage() {
var request = request("/billing/v2/tenant/" + tenant + "/usage").roles(tenantReader);
tester.assertResponse(request, "{\"from\":\"2021-04-13\",\"to\":\"2021-04-13\",\"total\":\"0.00\",\"items\":[]}");
}
@Test
void require_tenant_invoice() {
var listRequest = request("/billing/v2/tenant/" + tenant + "/bill").roles(tenantReader);
tester.assertResponse(listRequest, "{\"invoices\":[{\"id\":\"id-1\",\"from\":\"2020-05-23\",\"to\":\"2020-05-28\",\"total\":\"123.00\",\"status\":\"OPEN\"}]}");
var singleRequest = request("/billing/v2/tenant/" + tenant + "/bill/id-1").roles(tenantReader);
tester.assertResponse(singleRequest, """
{"id":"id-1","from":"2020-05-23","to":"2020-05-28","total":"123.00","status":"OPEN","statusHistory":[{"at":"2020-05-23T00:00:00Z","status":"OPEN"}],"items":[{"id":"some-id","description":"description","amount":"123.00","plan":{"id":"paid","name":"Paid Plan - for testing purposes"},"majorVersion":0,"cpu":{},"memory":{},"disk":{},"gpu":{}}]}""");
}
@Test
void require_accountant_summary() {
var tenantRequest = request("/billing/v2/accountant").roles(tenantReader);
tester.assertResponse(tenantRequest, "{\n" +
" \"code\" : 403,\n" +
" \"message\" : \"Access denied\"\n" +
"}", 403);
var accountantRequest = request("/billing/v2/accountant").roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"tenants":[{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"quota":{"budget":-1.0},"collection":"AUTO","lastBill":"1970-01-01","unbilled":"0.00"}]}""");
}
@Test
void require_accountant_preview() {
var accountantRequest = request("/billing/v2/accountant/preview").roles(Role.hostedAccountant());
billingController.uncommittedBills.put(tenant, createBill());
tester.assertResponse(accountantRequest, """
{"tenants":[{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"quota":{"budget":-1.0},"collection":"AUTO","lastBill":"2020-05-23","unbilled":"123.00"}]}""");
}
@Test
void require_accountant_tenant_preview() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/preview").roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, "{\"id\":\"empty\",\"from\":\"2021-04-13\",\"to\":\"2021-04-12\",\"total\":\"0.00\",\"status\":\"OPEN\",\"statusHistory\":[{\"at\":\"2021-04-13T00:00:00Z\",\"status\":\"OPEN\"}],\"items\":[]}");
}
@Test
void require_accountant_tenant_bill() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/preview", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("{\"from\": \"2020-05-01\",\"to\": \"2020-06-01\"}");
tester.assertResponse(accountantRequest, "{\"message\":\"Created bill id-123\"}");
}
@Test
void require_list_of_all_plans() {
var accountantRequest = request("/billing/v2/accountant/plans")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, "{\"plans\":[{\"id\":\"trial\",\"name\":\"Free Trial - for testing purposes\"},{\"id\":\"paid\",\"name\":\"Paid Plan - for testing purposes\"},{\"id\":\"none\",\"name\":\"None Plan - for testing purposes\"}]}");
}
@Test
void require_additional_items_empty() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"items":[]}""");
}
@Test
void require_additional_items_with_content() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{
"description": "Additional support costs",
"amount": "123.45"
}""");
tester.assertResponse(accountantRequest, """
{"message":"Added line item for tenant tenant1"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"items":[{"id":"line-item-id","description":"Additional support costs","amount":"123.45","plan":{"id":"paid","name":"Paid Plan - for testing purposes"},"majorVersion":0,"cpu":{},"memory":{},"disk":{},"gpu":{}}]}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/item/line-item-id", Request.Method.DELETE)
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"message":"Successfully deleted line item line-item-id"}""");
}
}
@Test
void require_current_plan() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"id":"trial","name":"Free Trial - for testing purposes"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{"id": "paid"}""");
tester.assertResponse(accountantRequest, """
{"message":"Plan: paid"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"id":"paid","name":"Paid Plan - for testing purposes"}""");
}
}
@Test
void require_current_collection() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"collection":"AUTO"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{"collection": "INVOICE"}""");
tester.assertResponse(accountantRequest, """
{"message":"Collection: INVOICE"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"collection":"INVOICE"}""");
}
}
@Test
void require_accountant_tenant() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes","billed":false,"supported":false},"billing":{},"collection":"AUTO"}""");
}
@Test
void lists_accepted_countries() {
var req = request("/billing/v2/countries").roles(tenantReader);
tester.assertJsonResponse(req, new File("accepted-countries.json"));
}
@Test
private static Bill createBill() {
var start = LocalDate.of(2020, 5, 23).atStartOfDay(ZoneOffset.UTC);
var end = start.toLocalDate().plusDays(6).atStartOfDay(ZoneOffset.UTC);
var statusHistory = new StatusHistory(new TreeMap<>(Map.of(start, BillStatus.OPEN)));
return new Bill(
Bill.Id.of("id-1"),
TenantName.defaultName(),
statusHistory,
List.of(createLineItem(start)),
start,
end
);
}
static Bill.LineItem createLineItem(ZonedDateTime addedAt) {
return new Bill.LineItem(
"some-id",
"description",
new BigDecimal("123.00"),
"paid",
"Smith",
addedAt
);
}
} | class BillingApiHandlerV2Test extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/responses/";
private static final TenantName tenant = TenantName.from("tenant1");
private static final TenantName tenant2 = TenantName.from("tenant2");
private static final Set<Role> tenantReader = Set.of(Role.reader(tenant));
private static final Set<Role> tenantAdmin = Set.of(Role.administrator(tenant));
private static final Set<Role> financeAdmin = Set.of(Role.hostedAccountant());
private MockBillingController billingController;
private ContainerTester tester;
@BeforeEach
public void before() {
tester = new ContainerTester(container, responseFiles);
tester.controller().tenants().create(new CloudTenantSpec(tenant, ""), new Auth0Credentials(() -> "foo", Set.of(Role.hostedOperator())));
var clock = (ManualClock) tester.controller().serviceRegistry().clock();
clock.setInstant(Instant.parse("2021-04-13T00:00:00Z"));
billingController = (MockBillingController) tester.serviceRegistry().billingController();
billingController.addBill(tenant, createBill(), true);
}
@Override
protected String variablePartXml() {
return " <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControlRequests'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControl'/>\n" +
" <handler id='com.yahoo.vespa.hosted.controller.restapi.billing.BillingApiHandlerV2'>\n" +
" <binding>http:
" </handler>\n" +
" <http>\n" +
" <server id='default' port='8080' />\n" +
" <filtering>\n" +
" <request-chain id='default'>\n" +
" <filter id='com.yahoo.vespa.hosted.controller.restapi.filter.ControllerAuthorizationFilter'/>\n" +
" <binding>http:
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n";
}
@Test
void require_tenant_info() {
var request = request("/billing/v2/tenant/" + tenant.value()).roles(tenantReader);
tester.assertResponse(request, "{\"tenant\":\"tenant1\",\"plan\":{\"id\":\"trial\",\"name\":\"Free Trial - for testing purposes\"},\"collection\":\"AUTO\"}");
}
@Test
void require_accountant_for_update_collection() {
var request = request("/billing/v2/tenant/" + tenant.value(), Request.Method.PATCH)
.data("{\"collection\": \"INVOICE\"}");
var forbidden = request.roles(tenantAdmin);
tester.assertResponse(forbidden, """
{
"code" : 403,
"message" : "Access denied"
}""", 403);
var success = request.roles(financeAdmin);
tester.assertResponse(success, """
{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"collection":"INVOICE"}""");
}
@Test
void require_tenant_usage() {
var request = request("/billing/v2/tenant/" + tenant + "/usage").roles(tenantReader);
tester.assertResponse(request, "{\"from\":\"2021-04-13\",\"to\":\"2021-04-13\",\"total\":\"0.00\",\"items\":[]}");
}
@Test
void require_tenant_invoice() {
var listRequest = request("/billing/v2/tenant/" + tenant + "/bill").roles(tenantReader);
tester.assertResponse(listRequest, "{\"invoices\":[{\"id\":\"id-1\",\"from\":\"2020-05-23\",\"to\":\"2020-05-28\",\"total\":\"123.00\",\"status\":\"OPEN\"}]}");
var singleRequest = request("/billing/v2/tenant/" + tenant + "/bill/id-1").roles(tenantReader);
tester.assertResponse(singleRequest, """
{"id":"id-1","from":"2020-05-23","to":"2020-05-28","total":"123.00","status":"OPEN","statusHistory":[{"at":"2020-05-23T00:00:00Z","status":"OPEN"}],"items":[{"id":"some-id","description":"description","amount":"123.00","plan":{"id":"paid","name":"Paid Plan - for testing purposes"},"majorVersion":0,"cpu":{},"memory":{},"disk":{},"gpu":{}}]}""");
}
@Test
void require_accountant_summary() {
var tenantRequest = request("/billing/v2/accountant").roles(tenantReader);
tester.assertResponse(tenantRequest, "{\n" +
" \"code\" : 403,\n" +
" \"message\" : \"Access denied\"\n" +
"}", 403);
var accountantRequest = request("/billing/v2/accountant").roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"tenants":[{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"quota":{"budget":-1.0},"collection":"AUTO","lastBill":"1970-01-01","unbilled":"0.00"}]}""");
}
@Test
void require_accountant_preview() {
var accountantRequest = request("/billing/v2/accountant/preview").roles(Role.hostedAccountant());
billingController.uncommittedBills.put(tenant, createBill());
tester.assertResponse(accountantRequest, """
{"tenants":[{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes"},"quota":{"budget":-1.0},"collection":"AUTO","lastBill":"2020-05-23","unbilled":"123.00"}]}""");
}
@Test
void require_accountant_tenant_preview() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/preview").roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, "{\"id\":\"empty\",\"from\":\"2021-04-13\",\"to\":\"2021-04-12\",\"total\":\"0.00\",\"status\":\"OPEN\",\"statusHistory\":[{\"at\":\"2021-04-13T00:00:00Z\",\"status\":\"OPEN\"}],\"items\":[]}");
}
@Test
void require_accountant_tenant_bill() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/preview", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("{\"from\": \"2020-05-01\",\"to\": \"2020-06-01\"}");
tester.assertResponse(accountantRequest, "{\"message\":\"Created bill id-123\"}");
}
@Test
void require_list_of_all_plans() {
var accountantRequest = request("/billing/v2/accountant/plans")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, "{\"plans\":[{\"id\":\"trial\",\"name\":\"Free Trial - for testing purposes\"},{\"id\":\"paid\",\"name\":\"Paid Plan - for testing purposes\"},{\"id\":\"none\",\"name\":\"None Plan - for testing purposes\"}]}");
}
@Test
void require_additional_items_empty() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"items":[]}""");
}
@Test
void require_additional_items_with_content() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{
"description": "Additional support costs",
"amount": "123.45"
}""");
tester.assertResponse(accountantRequest, """
{"message":"Added line item for tenant tenant1"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/items")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"items":[{"id":"line-item-id","description":"Additional support costs","amount":"123.45","plan":{"id":"paid","name":"Paid Plan - for testing purposes"},"majorVersion":0,"cpu":{},"memory":{},"disk":{},"gpu":{}}]}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/item/line-item-id", Request.Method.DELETE)
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"message":"Successfully deleted line item line-item-id"}""");
}
}
@Test
void require_current_plan() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"id":"trial","name":"Free Trial - for testing purposes"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{"id": "paid"}""");
tester.assertResponse(accountantRequest, """
{"message":"Plan: paid"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/plan")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"id":"paid","name":"Paid Plan - for testing purposes"}""");
}
}
@Test
void require_current_collection() {
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"collection":"AUTO"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection", Request.Method.POST)
.roles(Role.hostedAccountant())
.data("""
{"collection": "INVOICE"}""");
tester.assertResponse(accountantRequest, """
{"message":"Collection: INVOICE"}""");
}
{
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1/collection")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"collection":"INVOICE"}""");
}
}
@Test
void require_accountant_tenant() {
var accountantRequest = request("/billing/v2/accountant/tenant/tenant1")
.roles(Role.hostedAccountant());
tester.assertResponse(accountantRequest, """
{"tenant":"tenant1","plan":{"id":"trial","name":"Free Trial - for testing purposes","billed":false,"supported":false},"billing":{},"collection":"AUTO"}""");
}
@Test
void lists_accepted_countries() {
var req = request("/billing/v2/countries").roles(tenantReader);
tester.assertJsonResponse(req, new File("accepted-countries.json"));
}
@Test
private static Bill createBill() {
var start = LocalDate.of(2020, 5, 23).atStartOfDay(ZoneOffset.UTC);
var end = start.toLocalDate().plusDays(6).atStartOfDay(ZoneOffset.UTC);
var statusHistory = new StatusHistory(new TreeMap<>(Map.of(start, BillStatus.OPEN)));
return new Bill(
Bill.Id.of("id-1"),
TenantName.defaultName(),
statusHistory,
List.of(createLineItem(start)),
start,
end
);
}
static Bill.LineItem createLineItem(ZonedDateTime addedAt) {
return new Bill.LineItem(
"some-id",
"description",
new BigDecimal("123.00"),
"paid",
"Smith",
addedAt
);
}
} |
This shows the mapping from invoice update (system-agnostic) to our bill status. | List<InvoiceUpdate> maintainInvoices() {
var updates = new ArrayList<InvoiceUpdate>();
var tenants = cloudTenants();
var billsNeedingMaintenance = databaseClient.readBills().stream()
.filter(bill -> bill.getExportedId().isPresent())
.filter(exported -> ! exported.status().isFinal())
.toList();
for (var bill : billsNeedingMaintenance) {
var exportedId = bill.getExportedId().orElseThrow();
var update = reporter.maintainInvoice(tenants.get(bill.tenant()), bill);
switch (update.type()) {
case UNMODIFIED -> log.finer(() ->invoiceMessage(bill.id(), exportedId) + " was not modified");
case MODIFIED -> log.fine(invoiceMessage(bill.id(), exportedId) + " was updated with " + update.itemsUpdate().get());
case UNMODIFIABLE -> {
if (bill.status() != BillStatus.FROZEN) {
log.fine(() -> invoiceMessage(bill.id(), exportedId) + " is now unmodifiable");
databaseClient.setStatus(bill.id(), "system", BillStatus.FROZEN);
}
}
case REMOVED -> {
log.fine(() -> invoiceMessage(bill.id(), exportedId) + " has been deleted in the external system");
databaseClient.setExportedInvoiceId(bill.id(), null);
}
case PAID -> {
log.fine(() -> invoiceMessage(bill.id(), exportedId) + " has been paid in the external system");
databaseClient.setStatus(bill.id(), "system", BillStatus.SUCCESSFUL);
}
case VOIDED -> {
log.fine(() -> invoiceMessage(bill.id(), exportedId) + " has been voided in the external system");
databaseClient.setStatus(bill.id(), "system", BillStatus.VOID);
}
}
updates.add(update);
}
return updates;
} | } | List<InvoiceUpdate> maintainInvoices() {
var updates = new ArrayList<InvoiceUpdate>();
var tenants = cloudTenants();
var billsNeedingMaintenance = databaseClient.readBills().stream()
.filter(bill -> bill.getExportedId().isPresent())
.filter(exported -> ! exported.status().isFinal())
.toList();
for (var bill : billsNeedingMaintenance) {
var exportedId = bill.getExportedId().orElseThrow();
var update = reporter.maintainInvoice(tenants.get(bill.tenant()), bill);
switch (update.type()) {
case UNMODIFIED -> log.finer(() ->invoiceMessage(bill.id(), exportedId) + " was not modified");
case MODIFIED -> log.fine(invoiceMessage(bill.id(), exportedId) + " was updated with " + update.itemsUpdate().get());
case UNMODIFIABLE -> {
if (bill.status() != BillStatus.FROZEN) {
log.fine(() -> invoiceMessage(bill.id(), exportedId) + " is now unmodifiable");
databaseClient.setStatus(bill.id(), "system", BillStatus.FROZEN);
}
}
case REMOVED -> {
log.fine(() -> invoiceMessage(bill.id(), exportedId) + " has been deleted in the external system");
databaseClient.setExportedInvoiceId(bill.id(), null);
}
case PAID -> {
log.fine(() -> invoiceMessage(bill.id(), exportedId) + " has been paid in the external system");
databaseClient.setStatus(bill.id(), "system", BillStatus.SUCCESSFUL);
}
case VOIDED -> {
log.fine(() -> invoiceMessage(bill.id(), exportedId) + " has been voided in the external system");
databaseClient.setStatus(bill.id(), "system", BillStatus.VOID);
}
}
updates.add(update);
}
return updates;
} | class BillingReportMaintainer extends ControllerMaintainer {
private final BillingReporter reporter;
private final BillingController billing;
private final BillingDatabaseClient databaseClient;
private final PlanRegistry plans;
public BillingReportMaintainer(Controller controller, Duration interval) {
super(controller, interval, null, Set.of(SystemName.Public, SystemName.PublicCd));
reporter = controller.serviceRegistry().billingReporter();
billing = controller.serviceRegistry().billingController();
databaseClient = controller.serviceRegistry().billingDatabase();
plans = controller.serviceRegistry().planRegistry();
}
@Override
protected double maintain() {
maintainTenants();
var updates = maintainInvoices();
log.fine("Updated invoices: " + updates);
return 0.0;
}
private void maintainTenants() {
var tenants = cloudTenants();
var tenantNames = List.copyOf(tenants.keySet());
var billableTenants = billableTenants(tenantNames);
billableTenants.forEach(tenant -> {
controller().tenants().lockIfPresent(tenant, LockedTenant.Cloud.class, locked -> {
var ref = reporter.maintainTenant(locked.get());
if (locked.get().billingReference().isEmpty() || ! locked.get().billingReference().get().equals(ref)) {
controller().tenants().store(locked.with(ref));
}
});
});
}
private String invoiceMessage(Bill.Id billId, String invoiceId) {
return "Invoice '" + invoiceId + "' for bill '" + billId.value() + "'";
}
private Map<TenantName, CloudTenant> cloudTenants() {
return controller().tenants().asList()
.stream()
.filter(CloudTenant.class::isInstance)
.map(CloudTenant.class::cast)
.collect(Collectors.toMap(
Tenant::name,
Function.identity()));
}
private List<Plan> billablePlans() {
return plans.all().stream()
.filter(Plan::isBilled)
.toList();
}
private List<TenantName> billableTenants(List<TenantName> tenants) {
return billablePlans().stream()
.flatMap(p -> billing.tenantsWithPlan(tenants, p.id()).stream())
.toList();
}
} | class BillingReportMaintainer extends ControllerMaintainer {
private final BillingReporter reporter;
private final BillingController billing;
private final BillingDatabaseClient databaseClient;
private final PlanRegistry plans;
public BillingReportMaintainer(Controller controller, Duration interval) {
super(controller, interval, null, Set.of(SystemName.Public, SystemName.PublicCd));
reporter = controller.serviceRegistry().billingReporter();
billing = controller.serviceRegistry().billingController();
databaseClient = controller.serviceRegistry().billingDatabase();
plans = controller.serviceRegistry().planRegistry();
}
@Override
protected double maintain() {
maintainTenants();
var updates = maintainInvoices();
log.fine("Updated invoices: " + updates);
return 0.0;
}
private void maintainTenants() {
var tenants = cloudTenants();
var tenantNames = List.copyOf(tenants.keySet());
var billableTenants = billableTenants(tenantNames);
billableTenants.forEach(tenant -> {
controller().tenants().lockIfPresent(tenant, LockedTenant.Cloud.class, locked -> {
var ref = reporter.maintainTenant(locked.get());
if (locked.get().billingReference().isEmpty() || ! locked.get().billingReference().get().equals(ref)) {
controller().tenants().store(locked.with(ref));
}
});
});
}
private String invoiceMessage(Bill.Id billId, String invoiceId) {
return "Invoice '" + invoiceId + "' for bill '" + billId.value() + "'";
}
private Map<TenantName, CloudTenant> cloudTenants() {
return controller().tenants().asList()
.stream()
.filter(CloudTenant.class::isInstance)
.map(CloudTenant.class::cast)
.collect(Collectors.toMap(
Tenant::name,
Function.identity()));
}
private List<Plan> billablePlans() {
return plans.all().stream()
.filter(Plan::isBilled)
.toList();
}
private List<TenantName> billableTenants(List<TenantName> tenants) {
return billablePlans().stream()
.flatMap(p -> billing.tenantsWithPlan(tenants, p.id()).stream())
.toList();
}
} |
Perhaps the message should say something about what this implies, so that the user can do some changes. | private void warnOnce() {
if (shouldWarn) {
deployLogger.log(Level.WARNING, "Using attribute(" + attr +") " + collType + " as ranking expression input");
shouldWarn = false;
}
} | deployLogger.log(Level.WARNING, "Using attribute(" + attr +") " + collType + " as ranking expression input"); | private void warnOnce() {
if (shouldWarn) {
deployLogger.log(Level.WARNING, "Using attribute(" + attr +") " + collType + " in ranking expression will always evaluate to 0.0");
shouldWarn = false;
}
} | class AttributeErrorType extends TensorType {
private final DeployLogger deployLogger;
private final String attr;
private final Attribute.CollectionType collType;
private boolean shouldWarn = true;
AttributeErrorType(DeployLogger deployLogger, String attr, Attribute.CollectionType collType) {
super(TensorType.Value.DOUBLE, List.of());
this.deployLogger = deployLogger;
this.attr = attr;
this.collType = collType;
}
@Override public TensorType.Value valueType() { warnOnce(); return super.valueType(); }
@Override public int rank() { warnOnce(); return super.rank(); }
@Override public List<TensorType.Dimension> dimensions() { warnOnce(); return super.dimensions(); }
@Override public boolean equals(Object o) {
if (o instanceof TensorType other) {
return (other.rank() == 0);
}
return false;
}
} | class AttributeErrorType extends TensorType {
private final DeployLogger deployLogger;
private final String attr;
private final Attribute.CollectionType collType;
private boolean shouldWarn = true;
AttributeErrorType(DeployLogger deployLogger, String attr, Attribute.CollectionType collType) {
super(TensorType.Value.DOUBLE, List.of());
this.deployLogger = deployLogger;
this.attr = attr;
this.collType = collType;
}
@Override public TensorType.Value valueType() { warnOnce(); return super.valueType(); }
@Override public int rank() { warnOnce(); return super.rank(); }
@Override public List<TensorType.Dimension> dimensions() { warnOnce(); return super.dimensions(); }
@Override public boolean equals(Object o) {
if (o instanceof TensorType other) {
return (other.rank() == 0);
}
return false;
}
} |
What will happen if you use `attribute(foo).count`? This is a supported feature for both array and weighted set. From https://docs.vespa.ai/en/reference/rank-features.html#document-features: _attribute(name).count - The number of elements in the attribute with the given name_ | void requireThatUsingWsetWarns() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field foo type weightedset<int> {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: attribute(foo)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("collection");
assertNotNull(message);
assertEquals("WARNING: Using attribute(foo) collectiontype: WEIGHTEDSET as ranking expression input", message);
} | " expression: attribute(foo)", | void requireThatUsingWsetWarns() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field foo type weightedset<int> {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: attribute(foo)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("collection");
assertNotNull(message);
assertEquals("WARNING: Using attribute(foo) collectiontype: WEIGHTEDSET in ranking expression will always evaluate to 0.0", message);
} | class RankingExpressionTypeResolverTestCase {
@Test
void tensorFirstPhaseMustProduceDouble() throws Exception {
try {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[3]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: attribute(a)",
" }",
" }",
"}"
));
builder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The first-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[10],y[3])",
Exceptions.toMessageString(expected));
}
}
@Test
void tensorFirstPhaseFromConstantMustProduceDouble() throws Exception {
try {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"schema test {",
" document test { ",
" field a type tensor(d0[3]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function my_func() {",
" expression: x_tensor*2.0",
" }",
" function inline other_func() {",
" expression: z_tensor+3.0",
" }",
" first-phase {",
" expression: reduce(attribute(a),sum,d0)+y_tensor+my_func+other_func",
" }",
" constants {",
" x_tensor {",
" type: tensor(x{})",
" value: { {x:bar}:17 }",
" }",
" y_tensor tensor(y{}):{{y:foo}:42 }",
" z_tensor tensor(z{}):{qux:666}",
" }",
" }",
"}"
));
builder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The first-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x{},y{},z{})",
Exceptions.toMessageString(expected));
}
}
@Test
void tensorSecondPhaseMustProduceDouble() throws Exception {
try {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[3]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(attribute(a))",
" }",
" second-phase {",
" expression: attribute(a)",
" }",
" }",
"}"
));
builder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The second-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[10],y[3])",
Exceptions.toMessageString(expected));
}
}
@Test
void tensorConditionsMustHaveTypeCompatibleBranches() throws Exception {
try {
ApplicationBuilder schemaBuilder = new ApplicationBuilder();
schemaBuilder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[5]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(if(1>0, attribute(a), attribute(b)))",
" }",
" }",
"}"
));
schemaBuilder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The first-phase expression is invalid: An if expression must produce compatible types in both alternatives, but the 'true' type is tensor(x[10],y[5]) while the 'false' type is tensor(z[10])" +
"\n'true' branch: attribute(a)" +
"\n'false' branch: attribute(b)",
Exceptions.toMessageString(expected));
}
}
@Test
void testFunctionInvocationTypes() throws Exception {
RankProfileRegistry rankProfileRegistry = new RankProfileRegistry();
ApplicationBuilder builder = new ApplicationBuilder(rankProfileRegistry);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[3]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function macro1(attribute_to_use) {",
" expression: attribute(attribute_to_use)",
" }",
" summary-features {",
" macro1(a)",
" macro1(b)",
" }",
" }",
"}"
));
builder.build(true);
RankProfile profile =
builder.getRankProfileRegistry().get(builder.getSchema(), "my_rank_profile");
assertEquals(TensorType.fromSpec("tensor(x[10],y[3])"),
summaryFeatures(profile).get("macro1(a)").type(profile.typeContext(builder.getQueryProfileRegistry())));
assertEquals(TensorType.fromSpec("tensor(z[10])"),
summaryFeatures(profile).get("macro1(b)").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void testTensorFunctionInvocationTypes_Nested() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[1]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function return_a() {",
" expression: return_first(attribute(a), attribute(b))",
" }",
" function return_b() {",
" expression: return_second(attribute(a), attribute(b))",
" }",
" function return_first(e1, e2) {",
" expression: e1",
" }",
" function return_second(e1, e2) {",
" expression: return_first(e2, e1)",
" }",
" summary-features {",
" return_a",
" return_b",
" }",
" }",
"}"
));
builder.build(true);
RankProfile profile =
builder.getRankProfileRegistry().get(builder.getSchema(), "my_rank_profile");
assertEquals(TensorType.fromSpec("tensor(x[10],y[1])"),
summaryFeatures(profile).get("return_a").type(profile.typeContext(builder.getQueryProfileRegistry())));
assertEquals(TensorType.fromSpec("tensor(z[10])"),
summaryFeatures(profile).get("return_b").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void testAttributeInvocationViaBoundIdentifier() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search newsarticle {",
" document newsarticle {",
" field title type string {",
" indexing {",
" input title | index",
" }",
" weight: 30",
" }",
" field usstaticrank type int {",
" indexing: summary | attribute",
" }",
" field eustaticrank type int {",
" indexing: summary | attribute",
" }",
" }",
" rank-profile default {",
" macro newsboost() { ",
" expression: 200 * matches(title)",
" }",
" macro commonboost(mystaticrank) { ",
" expression: attribute(mystaticrank) + newsboost",
" }",
" macro commonfirstphase(mystaticrank) { ",
" expression: nativeFieldMatch(title) + commonboost(mystaticrank) ",
" }",
" first-phase { expression: commonfirstphase(usstaticrank) }",
" }",
" rank-profile eurank inherits default {",
" first-phase { expression: commonfirstphase(eustaticrank) }",
" }",
"}"));
builder.build(true);
RankProfile profile = builder.getRankProfileRegistry().get(builder.getSchema(), "eurank");
}
@Test
void testTensorFunctionInvocationTypes_NestedSameName() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[1]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function return_a() {",
" expression: return_first(attribute(a), attribute(b))",
" }",
" function return_b() {",
" expression: return_second(attribute(a), attribute(b))",
" }",
" function return_first(e1, e2) {",
" expression: just_return(e1)",
" }",
" function just_return(e1) {",
" expression: e1",
" }",
" function return_second(e1, e2) {",
" expression: return_first(e2+0, e1)",
" }",
" summary-features {",
" return_a",
" return_b",
" }",
" }",
"}"
));
builder.build(true);
RankProfile profile =
builder.getRankProfileRegistry().get(builder.getSchema(), "my_rank_profile");
assertEquals(TensorType.fromSpec("tensor(x[10],y[1])"),
summaryFeatures(profile).get("return_a").type(profile.typeContext(builder.getQueryProfileRegistry())));
assertEquals(TensorType.fromSpec("tensor(z[10])"),
summaryFeatures(profile).get("return_b").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void testTensorFunctionInvocationTypes_viaFuncWithExpr() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test {",
" field t1 type tensor<float>(y{}) { indexing: attribute | summary }",
" field t2 type tensor<float>(x{}) { indexing: attribute | summary }",
" }",
" rank-profile test {",
" function my_func(t) { expression: sum(t, x) + 1 }",
" function test_func_via_func_with_expr() { expression: call_func_with_expr( attribute(t1), attribute(t2) ) }",
" function call_func_with_expr(a, b) { expression: my_func( a * b ) }",
" summary-features { test_func_via_func_with_expr }",
" }",
"}"));
builder.build(true);
RankProfile profile = builder.getRankProfileRegistry().get(builder.getSchema(), "test");
assertEquals(TensorType.fromSpec("tensor<float>(y{})"),
summaryFeatures(profile).get("test_func_via_func_with_expr").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void importedFieldsAreAvailable() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search parent {",
" document parent {",
" field a type tensor(x[5],y[1000]) {",
" indexing: attribute",
" }",
" }",
"}"
));
builder.addSchema(joinLines(
"search child {",
" document child { ",
" field ref type reference<parent> {",
"indexing: attribute | summary",
" }",
" }",
" import field ref.a as imported_a {}",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(attribute(imported_a))",
" }",
" }",
"}"
));
builder.build(true);
}
@Test
void undeclaredQueryFeaturesAreAccepted() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type double {" +
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: query(foo) + f() + sum(attribute(anyfield))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("The following query features");
assertNull(message);
}
@Test
void undeclaredQueryFeaturesAreNotAcceptedWhenStrict() throws Exception {
try {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type double {" +
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" strict: true" +
" first-phase {",
" expression: query(foo) + f() + sum(attribute(anyfield))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
}
catch (IllegalArgumentException e) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': rank profile 'my_rank_profile' " +
"is strict but is missing a declaration of inputs [query(bar), query(baz), query(foo)]",
Exceptions.toMessageString(e));
}
}
@Test
void undeclaredQueryFeaturesAreAcceptedWithWarningWhenUsingTensors() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type tensor(d[2]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: query(foo) + f() + sum(attribute(anyfield))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("The following inputs");
assertNotNull(message);
assertEquals("WARNING: The following inputs used in rank profile 'my_rank_profile' are not declared and " +
"will be interpreted as scalars, not tensors: [query(bar), query(baz), query(foo)]",
message);
}
@Test
void requireThatUsingArrayWarns() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field foo type array<float> {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: map(attribute(foo), f(x)(42*x))",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("collection");
assertNotNull(message);
assertEquals("WARNING: Using attribute(foo) collectiontype: ARRAY as ranking expression input", message);
}
@Test
@Test
void noWarningWhenUsingTensorsWhenQueryFeaturesAreDeclared() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
QueryProfileType myType = new QueryProfileType("mytype");
myType.addField(new FieldDescription("rank.feature.query(foo)",
new TensorFieldType(TensorType.fromSpec("tensor(d[2])"))),
builder.getQueryProfileRegistry().getTypeRegistry());
myType.addField(new FieldDescription("rank.feature.query(bar)",
new TensorFieldType(TensorType.fromSpec("tensor(d[2])"))),
builder.getQueryProfileRegistry().getTypeRegistry());
myType.addField(new FieldDescription("rank.feature.query(baz)",
new TensorFieldType(TensorType.fromSpec("tensor(d[2])"))),
builder.getQueryProfileRegistry().getTypeRegistry());
builder.getQueryProfileRegistry().getTypeRegistry().register(myType);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type tensor(d[2]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(query(foo) + f() + sum(attribute(anyfield)))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("The following query features");
assertNull(message);
}
private Map<String, ReferenceNode> summaryFeatures(RankProfile profile) {
return profile.getSummaryFeatures().stream().collect(Collectors.toMap(f -> f.toString(), f -> f));
}
private static class InspectableDeployLogger implements DeployLogger {
private List<String> messages = new ArrayList<>();
@Override
public void log(Level level, String message) {
messages.add(level + ": " + message);
}
/** Returns the first message containing the given string, or null if none */
public String findMessage(String substring) {
return messages.stream().filter(message -> message.contains(substring)).findFirst().orElse(null);
}
}
} | class RankingExpressionTypeResolverTestCase {
@Test
void tensorFirstPhaseMustProduceDouble() throws Exception {
try {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[3]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: attribute(a)",
" }",
" }",
"}"
));
builder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The first-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[10],y[3])",
Exceptions.toMessageString(expected));
}
}
@Test
void tensorFirstPhaseFromConstantMustProduceDouble() throws Exception {
try {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"schema test {",
" document test { ",
" field a type tensor(d0[3]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function my_func() {",
" expression: x_tensor*2.0",
" }",
" function inline other_func() {",
" expression: z_tensor+3.0",
" }",
" first-phase {",
" expression: reduce(attribute(a),sum,d0)+y_tensor+my_func+other_func",
" }",
" constants {",
" x_tensor {",
" type: tensor(x{})",
" value: { {x:bar}:17 }",
" }",
" y_tensor tensor(y{}):{{y:foo}:42 }",
" z_tensor tensor(z{}):{qux:666}",
" }",
" }",
"}"
));
builder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The first-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x{},y{},z{})",
Exceptions.toMessageString(expected));
}
}
@Test
void tensorSecondPhaseMustProduceDouble() throws Exception {
try {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[3]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(attribute(a))",
" }",
" second-phase {",
" expression: attribute(a)",
" }",
" }",
"}"
));
builder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The second-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[10],y[3])",
Exceptions.toMessageString(expected));
}
}
@Test
void tensorConditionsMustHaveTypeCompatibleBranches() throws Exception {
try {
ApplicationBuilder schemaBuilder = new ApplicationBuilder();
schemaBuilder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[5]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(if(1>0, attribute(a), attribute(b)))",
" }",
" }",
"}"
));
schemaBuilder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The first-phase expression is invalid: An if expression must produce compatible types in both alternatives, but the 'true' type is tensor(x[10],y[5]) while the 'false' type is tensor(z[10])" +
"\n'true' branch: attribute(a)" +
"\n'false' branch: attribute(b)",
Exceptions.toMessageString(expected));
}
}
@Test
void testFunctionInvocationTypes() throws Exception {
RankProfileRegistry rankProfileRegistry = new RankProfileRegistry();
ApplicationBuilder builder = new ApplicationBuilder(rankProfileRegistry);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[3]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function macro1(attribute_to_use) {",
" expression: attribute(attribute_to_use)",
" }",
" summary-features {",
" macro1(a)",
" macro1(b)",
" }",
" }",
"}"
));
builder.build(true);
RankProfile profile =
builder.getRankProfileRegistry().get(builder.getSchema(), "my_rank_profile");
assertEquals(TensorType.fromSpec("tensor(x[10],y[3])"),
summaryFeatures(profile).get("macro1(a)").type(profile.typeContext(builder.getQueryProfileRegistry())));
assertEquals(TensorType.fromSpec("tensor(z[10])"),
summaryFeatures(profile).get("macro1(b)").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void testTensorFunctionInvocationTypes_Nested() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[1]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function return_a() {",
" expression: return_first(attribute(a), attribute(b))",
" }",
" function return_b() {",
" expression: return_second(attribute(a), attribute(b))",
" }",
" function return_first(e1, e2) {",
" expression: e1",
" }",
" function return_second(e1, e2) {",
" expression: return_first(e2, e1)",
" }",
" summary-features {",
" return_a",
" return_b",
" }",
" }",
"}"
));
builder.build(true);
RankProfile profile =
builder.getRankProfileRegistry().get(builder.getSchema(), "my_rank_profile");
assertEquals(TensorType.fromSpec("tensor(x[10],y[1])"),
summaryFeatures(profile).get("return_a").type(profile.typeContext(builder.getQueryProfileRegistry())));
assertEquals(TensorType.fromSpec("tensor(z[10])"),
summaryFeatures(profile).get("return_b").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void testAttributeInvocationViaBoundIdentifier() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search newsarticle {",
" document newsarticle {",
" field title type string {",
" indexing {",
" input title | index",
" }",
" weight: 30",
" }",
" field usstaticrank type int {",
" indexing: summary | attribute",
" }",
" field eustaticrank type int {",
" indexing: summary | attribute",
" }",
" }",
" rank-profile default {",
" macro newsboost() { ",
" expression: 200 * matches(title)",
" }",
" macro commonboost(mystaticrank) { ",
" expression: attribute(mystaticrank) + newsboost",
" }",
" macro commonfirstphase(mystaticrank) { ",
" expression: nativeFieldMatch(title) + commonboost(mystaticrank) ",
" }",
" first-phase { expression: commonfirstphase(usstaticrank) }",
" }",
" rank-profile eurank inherits default {",
" first-phase { expression: commonfirstphase(eustaticrank) }",
" }",
"}"));
builder.build(true);
RankProfile profile = builder.getRankProfileRegistry().get(builder.getSchema(), "eurank");
}
@Test
void testTensorFunctionInvocationTypes_NestedSameName() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[1]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function return_a() {",
" expression: return_first(attribute(a), attribute(b))",
" }",
" function return_b() {",
" expression: return_second(attribute(a), attribute(b))",
" }",
" function return_first(e1, e2) {",
" expression: just_return(e1)",
" }",
" function just_return(e1) {",
" expression: e1",
" }",
" function return_second(e1, e2) {",
" expression: return_first(e2+0, e1)",
" }",
" summary-features {",
" return_a",
" return_b",
" }",
" }",
"}"
));
builder.build(true);
RankProfile profile =
builder.getRankProfileRegistry().get(builder.getSchema(), "my_rank_profile");
assertEquals(TensorType.fromSpec("tensor(x[10],y[1])"),
summaryFeatures(profile).get("return_a").type(profile.typeContext(builder.getQueryProfileRegistry())));
assertEquals(TensorType.fromSpec("tensor(z[10])"),
summaryFeatures(profile).get("return_b").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void testTensorFunctionInvocationTypes_viaFuncWithExpr() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test {",
" field t1 type tensor<float>(y{}) { indexing: attribute | summary }",
" field t2 type tensor<float>(x{}) { indexing: attribute | summary }",
" }",
" rank-profile test {",
" function my_func(t) { expression: sum(t, x) + 1 }",
" function test_func_via_func_with_expr() { expression: call_func_with_expr( attribute(t1), attribute(t2) ) }",
" function call_func_with_expr(a, b) { expression: my_func( a * b ) }",
" summary-features { test_func_via_func_with_expr }",
" }",
"}"));
builder.build(true);
RankProfile profile = builder.getRankProfileRegistry().get(builder.getSchema(), "test");
assertEquals(TensorType.fromSpec("tensor<float>(y{})"),
summaryFeatures(profile).get("test_func_via_func_with_expr").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void importedFieldsAreAvailable() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search parent {",
" document parent {",
" field a type tensor(x[5],y[1000]) {",
" indexing: attribute",
" }",
" }",
"}"
));
builder.addSchema(joinLines(
"search child {",
" document child { ",
" field ref type reference<parent> {",
"indexing: attribute | summary",
" }",
" }",
" import field ref.a as imported_a {}",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(attribute(imported_a))",
" }",
" }",
"}"
));
builder.build(true);
}
@Test
void undeclaredQueryFeaturesAreAccepted() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type double {" +
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: query(foo) + f() + sum(attribute(anyfield))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("The following query features");
assertNull(message);
}
@Test
void undeclaredQueryFeaturesAreNotAcceptedWhenStrict() throws Exception {
try {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type double {" +
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" strict: true" +
" first-phase {",
" expression: query(foo) + f() + sum(attribute(anyfield))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
}
catch (IllegalArgumentException e) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': rank profile 'my_rank_profile' " +
"is strict but is missing a declaration of inputs [query(bar), query(baz), query(foo)]",
Exceptions.toMessageString(e));
}
}
@Test
void undeclaredQueryFeaturesAreAcceptedWithWarningWhenUsingTensors() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type tensor(d[2]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: query(foo) + f() + sum(attribute(anyfield))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("The following inputs");
assertNotNull(message);
assertEquals("WARNING: The following inputs used in rank profile 'my_rank_profile' are not declared and " +
"will be interpreted as scalars, not tensors: [query(bar), query(baz), query(foo)]",
message);
}
@Test
void requireThatUsingArrayWarns() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field foo type array<float> {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: map(attribute(foo), f(x)(42*x))",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("collection");
assertNotNull(message);
assertEquals("WARNING: Using attribute(foo) collectiontype: ARRAY in ranking expression will always evaluate to 0.0", message);
}
@Test
@Test
void noWarningWhenUsingTensorsWhenQueryFeaturesAreDeclared() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
QueryProfileType myType = new QueryProfileType("mytype");
myType.addField(new FieldDescription("rank.feature.query(foo)",
new TensorFieldType(TensorType.fromSpec("tensor(d[2])"))),
builder.getQueryProfileRegistry().getTypeRegistry());
myType.addField(new FieldDescription("rank.feature.query(bar)",
new TensorFieldType(TensorType.fromSpec("tensor(d[2])"))),
builder.getQueryProfileRegistry().getTypeRegistry());
myType.addField(new FieldDescription("rank.feature.query(baz)",
new TensorFieldType(TensorType.fromSpec("tensor(d[2])"))),
builder.getQueryProfileRegistry().getTypeRegistry());
builder.getQueryProfileRegistry().getTypeRegistry().register(myType);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type tensor(d[2]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(query(foo) + f() + sum(attribute(anyfield)))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("The following query features");
assertNull(message);
}
private Map<String, ReferenceNode> summaryFeatures(RankProfile profile) {
return profile.getSummaryFeatures().stream().collect(Collectors.toMap(f -> f.toString(), f -> f));
}
private static class InspectableDeployLogger implements DeployLogger {
private List<String> messages = new ArrayList<>();
@Override
public void log(Level level, String message) {
messages.add(level + ": " + message);
}
/** Returns the first message containing the given string, or null if none */
public String findMessage(String substring) {
return messages.stream().filter(message -> message.contains(substring)).findFirst().orElse(null);
}
}
} |
Discussed with Arne, and this is handled. | void requireThatUsingWsetWarns() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field foo type weightedset<int> {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: attribute(foo)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("collection");
assertNotNull(message);
assertEquals("WARNING: Using attribute(foo) collectiontype: WEIGHTEDSET as ranking expression input", message);
} | " expression: attribute(foo)", | void requireThatUsingWsetWarns() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field foo type weightedset<int> {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: attribute(foo)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("collection");
assertNotNull(message);
assertEquals("WARNING: Using attribute(foo) collectiontype: WEIGHTEDSET in ranking expression will always evaluate to 0.0", message);
} | class RankingExpressionTypeResolverTestCase {
@Test
void tensorFirstPhaseMustProduceDouble() throws Exception {
try {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[3]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: attribute(a)",
" }",
" }",
"}"
));
builder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The first-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[10],y[3])",
Exceptions.toMessageString(expected));
}
}
@Test
void tensorFirstPhaseFromConstantMustProduceDouble() throws Exception {
try {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"schema test {",
" document test { ",
" field a type tensor(d0[3]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function my_func() {",
" expression: x_tensor*2.0",
" }",
" function inline other_func() {",
" expression: z_tensor+3.0",
" }",
" first-phase {",
" expression: reduce(attribute(a),sum,d0)+y_tensor+my_func+other_func",
" }",
" constants {",
" x_tensor {",
" type: tensor(x{})",
" value: { {x:bar}:17 }",
" }",
" y_tensor tensor(y{}):{{y:foo}:42 }",
" z_tensor tensor(z{}):{qux:666}",
" }",
" }",
"}"
));
builder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The first-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x{},y{},z{})",
Exceptions.toMessageString(expected));
}
}
@Test
void tensorSecondPhaseMustProduceDouble() throws Exception {
try {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[3]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(attribute(a))",
" }",
" second-phase {",
" expression: attribute(a)",
" }",
" }",
"}"
));
builder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The second-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[10],y[3])",
Exceptions.toMessageString(expected));
}
}
@Test
void tensorConditionsMustHaveTypeCompatibleBranches() throws Exception {
try {
ApplicationBuilder schemaBuilder = new ApplicationBuilder();
schemaBuilder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[5]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(if(1>0, attribute(a), attribute(b)))",
" }",
" }",
"}"
));
schemaBuilder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The first-phase expression is invalid: An if expression must produce compatible types in both alternatives, but the 'true' type is tensor(x[10],y[5]) while the 'false' type is tensor(z[10])" +
"\n'true' branch: attribute(a)" +
"\n'false' branch: attribute(b)",
Exceptions.toMessageString(expected));
}
}
@Test
void testFunctionInvocationTypes() throws Exception {
RankProfileRegistry rankProfileRegistry = new RankProfileRegistry();
ApplicationBuilder builder = new ApplicationBuilder(rankProfileRegistry);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[3]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function macro1(attribute_to_use) {",
" expression: attribute(attribute_to_use)",
" }",
" summary-features {",
" macro1(a)",
" macro1(b)",
" }",
" }",
"}"
));
builder.build(true);
RankProfile profile =
builder.getRankProfileRegistry().get(builder.getSchema(), "my_rank_profile");
assertEquals(TensorType.fromSpec("tensor(x[10],y[3])"),
summaryFeatures(profile).get("macro1(a)").type(profile.typeContext(builder.getQueryProfileRegistry())));
assertEquals(TensorType.fromSpec("tensor(z[10])"),
summaryFeatures(profile).get("macro1(b)").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void testTensorFunctionInvocationTypes_Nested() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[1]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function return_a() {",
" expression: return_first(attribute(a), attribute(b))",
" }",
" function return_b() {",
" expression: return_second(attribute(a), attribute(b))",
" }",
" function return_first(e1, e2) {",
" expression: e1",
" }",
" function return_second(e1, e2) {",
" expression: return_first(e2, e1)",
" }",
" summary-features {",
" return_a",
" return_b",
" }",
" }",
"}"
));
builder.build(true);
RankProfile profile =
builder.getRankProfileRegistry().get(builder.getSchema(), "my_rank_profile");
assertEquals(TensorType.fromSpec("tensor(x[10],y[1])"),
summaryFeatures(profile).get("return_a").type(profile.typeContext(builder.getQueryProfileRegistry())));
assertEquals(TensorType.fromSpec("tensor(z[10])"),
summaryFeatures(profile).get("return_b").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void testAttributeInvocationViaBoundIdentifier() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search newsarticle {",
" document newsarticle {",
" field title type string {",
" indexing {",
" input title | index",
" }",
" weight: 30",
" }",
" field usstaticrank type int {",
" indexing: summary | attribute",
" }",
" field eustaticrank type int {",
" indexing: summary | attribute",
" }",
" }",
" rank-profile default {",
" macro newsboost() { ",
" expression: 200 * matches(title)",
" }",
" macro commonboost(mystaticrank) { ",
" expression: attribute(mystaticrank) + newsboost",
" }",
" macro commonfirstphase(mystaticrank) { ",
" expression: nativeFieldMatch(title) + commonboost(mystaticrank) ",
" }",
" first-phase { expression: commonfirstphase(usstaticrank) }",
" }",
" rank-profile eurank inherits default {",
" first-phase { expression: commonfirstphase(eustaticrank) }",
" }",
"}"));
builder.build(true);
RankProfile profile = builder.getRankProfileRegistry().get(builder.getSchema(), "eurank");
}
@Test
void testTensorFunctionInvocationTypes_NestedSameName() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[1]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function return_a() {",
" expression: return_first(attribute(a), attribute(b))",
" }",
" function return_b() {",
" expression: return_second(attribute(a), attribute(b))",
" }",
" function return_first(e1, e2) {",
" expression: just_return(e1)",
" }",
" function just_return(e1) {",
" expression: e1",
" }",
" function return_second(e1, e2) {",
" expression: return_first(e2+0, e1)",
" }",
" summary-features {",
" return_a",
" return_b",
" }",
" }",
"}"
));
builder.build(true);
RankProfile profile =
builder.getRankProfileRegistry().get(builder.getSchema(), "my_rank_profile");
assertEquals(TensorType.fromSpec("tensor(x[10],y[1])"),
summaryFeatures(profile).get("return_a").type(profile.typeContext(builder.getQueryProfileRegistry())));
assertEquals(TensorType.fromSpec("tensor(z[10])"),
summaryFeatures(profile).get("return_b").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void testTensorFunctionInvocationTypes_viaFuncWithExpr() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test {",
" field t1 type tensor<float>(y{}) { indexing: attribute | summary }",
" field t2 type tensor<float>(x{}) { indexing: attribute | summary }",
" }",
" rank-profile test {",
" function my_func(t) { expression: sum(t, x) + 1 }",
" function test_func_via_func_with_expr() { expression: call_func_with_expr( attribute(t1), attribute(t2) ) }",
" function call_func_with_expr(a, b) { expression: my_func( a * b ) }",
" summary-features { test_func_via_func_with_expr }",
" }",
"}"));
builder.build(true);
RankProfile profile = builder.getRankProfileRegistry().get(builder.getSchema(), "test");
assertEquals(TensorType.fromSpec("tensor<float>(y{})"),
summaryFeatures(profile).get("test_func_via_func_with_expr").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void importedFieldsAreAvailable() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search parent {",
" document parent {",
" field a type tensor(x[5],y[1000]) {",
" indexing: attribute",
" }",
" }",
"}"
));
builder.addSchema(joinLines(
"search child {",
" document child { ",
" field ref type reference<parent> {",
"indexing: attribute | summary",
" }",
" }",
" import field ref.a as imported_a {}",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(attribute(imported_a))",
" }",
" }",
"}"
));
builder.build(true);
}
@Test
void undeclaredQueryFeaturesAreAccepted() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type double {" +
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: query(foo) + f() + sum(attribute(anyfield))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("The following query features");
assertNull(message);
}
@Test
void undeclaredQueryFeaturesAreNotAcceptedWhenStrict() throws Exception {
try {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type double {" +
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" strict: true" +
" first-phase {",
" expression: query(foo) + f() + sum(attribute(anyfield))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
}
catch (IllegalArgumentException e) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': rank profile 'my_rank_profile' " +
"is strict but is missing a declaration of inputs [query(bar), query(baz), query(foo)]",
Exceptions.toMessageString(e));
}
}
@Test
void undeclaredQueryFeaturesAreAcceptedWithWarningWhenUsingTensors() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type tensor(d[2]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: query(foo) + f() + sum(attribute(anyfield))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("The following inputs");
assertNotNull(message);
assertEquals("WARNING: The following inputs used in rank profile 'my_rank_profile' are not declared and " +
"will be interpreted as scalars, not tensors: [query(bar), query(baz), query(foo)]",
message);
}
@Test
void requireThatUsingArrayWarns() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field foo type array<float> {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: map(attribute(foo), f(x)(42*x))",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("collection");
assertNotNull(message);
assertEquals("WARNING: Using attribute(foo) collectiontype: ARRAY as ranking expression input", message);
}
@Test
@Test
void noWarningWhenUsingTensorsWhenQueryFeaturesAreDeclared() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
QueryProfileType myType = new QueryProfileType("mytype");
myType.addField(new FieldDescription("rank.feature.query(foo)",
new TensorFieldType(TensorType.fromSpec("tensor(d[2])"))),
builder.getQueryProfileRegistry().getTypeRegistry());
myType.addField(new FieldDescription("rank.feature.query(bar)",
new TensorFieldType(TensorType.fromSpec("tensor(d[2])"))),
builder.getQueryProfileRegistry().getTypeRegistry());
myType.addField(new FieldDescription("rank.feature.query(baz)",
new TensorFieldType(TensorType.fromSpec("tensor(d[2])"))),
builder.getQueryProfileRegistry().getTypeRegistry());
builder.getQueryProfileRegistry().getTypeRegistry().register(myType);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type tensor(d[2]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(query(foo) + f() + sum(attribute(anyfield)))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("The following query features");
assertNull(message);
}
private Map<String, ReferenceNode> summaryFeatures(RankProfile profile) {
return profile.getSummaryFeatures().stream().collect(Collectors.toMap(f -> f.toString(), f -> f));
}
private static class InspectableDeployLogger implements DeployLogger {
private List<String> messages = new ArrayList<>();
@Override
public void log(Level level, String message) {
messages.add(level + ": " + message);
}
/** Returns the first message containing the given string, or null if none */
public String findMessage(String substring) {
return messages.stream().filter(message -> message.contains(substring)).findFirst().orElse(null);
}
}
} | class RankingExpressionTypeResolverTestCase {
@Test
void tensorFirstPhaseMustProduceDouble() throws Exception {
try {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[3]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: attribute(a)",
" }",
" }",
"}"
));
builder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The first-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[10],y[3])",
Exceptions.toMessageString(expected));
}
}
@Test
void tensorFirstPhaseFromConstantMustProduceDouble() throws Exception {
try {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"schema test {",
" document test { ",
" field a type tensor(d0[3]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function my_func() {",
" expression: x_tensor*2.0",
" }",
" function inline other_func() {",
" expression: z_tensor+3.0",
" }",
" first-phase {",
" expression: reduce(attribute(a),sum,d0)+y_tensor+my_func+other_func",
" }",
" constants {",
" x_tensor {",
" type: tensor(x{})",
" value: { {x:bar}:17 }",
" }",
" y_tensor tensor(y{}):{{y:foo}:42 }",
" z_tensor tensor(z{}):{qux:666}",
" }",
" }",
"}"
));
builder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The first-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x{},y{},z{})",
Exceptions.toMessageString(expected));
}
}
@Test
void tensorSecondPhaseMustProduceDouble() throws Exception {
try {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[3]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(attribute(a))",
" }",
" second-phase {",
" expression: attribute(a)",
" }",
" }",
"}"
));
builder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The second-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x[10],y[3])",
Exceptions.toMessageString(expected));
}
}
@Test
void tensorConditionsMustHaveTypeCompatibleBranches() throws Exception {
try {
ApplicationBuilder schemaBuilder = new ApplicationBuilder();
schemaBuilder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[5]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(if(1>0, attribute(a), attribute(b)))",
" }",
" }",
"}"
));
schemaBuilder.build(true);
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': The first-phase expression is invalid: An if expression must produce compatible types in both alternatives, but the 'true' type is tensor(x[10],y[5]) while the 'false' type is tensor(z[10])" +
"\n'true' branch: attribute(a)" +
"\n'false' branch: attribute(b)",
Exceptions.toMessageString(expected));
}
}
@Test
void testFunctionInvocationTypes() throws Exception {
RankProfileRegistry rankProfileRegistry = new RankProfileRegistry();
ApplicationBuilder builder = new ApplicationBuilder(rankProfileRegistry);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[3]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function macro1(attribute_to_use) {",
" expression: attribute(attribute_to_use)",
" }",
" summary-features {",
" macro1(a)",
" macro1(b)",
" }",
" }",
"}"
));
builder.build(true);
RankProfile profile =
builder.getRankProfileRegistry().get(builder.getSchema(), "my_rank_profile");
assertEquals(TensorType.fromSpec("tensor(x[10],y[3])"),
summaryFeatures(profile).get("macro1(a)").type(profile.typeContext(builder.getQueryProfileRegistry())));
assertEquals(TensorType.fromSpec("tensor(z[10])"),
summaryFeatures(profile).get("macro1(b)").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void testTensorFunctionInvocationTypes_Nested() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[1]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function return_a() {",
" expression: return_first(attribute(a), attribute(b))",
" }",
" function return_b() {",
" expression: return_second(attribute(a), attribute(b))",
" }",
" function return_first(e1, e2) {",
" expression: e1",
" }",
" function return_second(e1, e2) {",
" expression: return_first(e2, e1)",
" }",
" summary-features {",
" return_a",
" return_b",
" }",
" }",
"}"
));
builder.build(true);
RankProfile profile =
builder.getRankProfileRegistry().get(builder.getSchema(), "my_rank_profile");
assertEquals(TensorType.fromSpec("tensor(x[10],y[1])"),
summaryFeatures(profile).get("return_a").type(profile.typeContext(builder.getQueryProfileRegistry())));
assertEquals(TensorType.fromSpec("tensor(z[10])"),
summaryFeatures(profile).get("return_b").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void testAttributeInvocationViaBoundIdentifier() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search newsarticle {",
" document newsarticle {",
" field title type string {",
" indexing {",
" input title | index",
" }",
" weight: 30",
" }",
" field usstaticrank type int {",
" indexing: summary | attribute",
" }",
" field eustaticrank type int {",
" indexing: summary | attribute",
" }",
" }",
" rank-profile default {",
" macro newsboost() { ",
" expression: 200 * matches(title)",
" }",
" macro commonboost(mystaticrank) { ",
" expression: attribute(mystaticrank) + newsboost",
" }",
" macro commonfirstphase(mystaticrank) { ",
" expression: nativeFieldMatch(title) + commonboost(mystaticrank) ",
" }",
" first-phase { expression: commonfirstphase(usstaticrank) }",
" }",
" rank-profile eurank inherits default {",
" first-phase { expression: commonfirstphase(eustaticrank) }",
" }",
"}"));
builder.build(true);
RankProfile profile = builder.getRankProfileRegistry().get(builder.getSchema(), "eurank");
}
@Test
void testTensorFunctionInvocationTypes_NestedSameName() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field a type tensor(x[10],y[1]) {",
" indexing: attribute",
" }",
" field b type tensor(z[10]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" function return_a() {",
" expression: return_first(attribute(a), attribute(b))",
" }",
" function return_b() {",
" expression: return_second(attribute(a), attribute(b))",
" }",
" function return_first(e1, e2) {",
" expression: just_return(e1)",
" }",
" function just_return(e1) {",
" expression: e1",
" }",
" function return_second(e1, e2) {",
" expression: return_first(e2+0, e1)",
" }",
" summary-features {",
" return_a",
" return_b",
" }",
" }",
"}"
));
builder.build(true);
RankProfile profile =
builder.getRankProfileRegistry().get(builder.getSchema(), "my_rank_profile");
assertEquals(TensorType.fromSpec("tensor(x[10],y[1])"),
summaryFeatures(profile).get("return_a").type(profile.typeContext(builder.getQueryProfileRegistry())));
assertEquals(TensorType.fromSpec("tensor(z[10])"),
summaryFeatures(profile).get("return_b").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void testTensorFunctionInvocationTypes_viaFuncWithExpr() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search test {",
" document test {",
" field t1 type tensor<float>(y{}) { indexing: attribute | summary }",
" field t2 type tensor<float>(x{}) { indexing: attribute | summary }",
" }",
" rank-profile test {",
" function my_func(t) { expression: sum(t, x) + 1 }",
" function test_func_via_func_with_expr() { expression: call_func_with_expr( attribute(t1), attribute(t2) ) }",
" function call_func_with_expr(a, b) { expression: my_func( a * b ) }",
" summary-features { test_func_via_func_with_expr }",
" }",
"}"));
builder.build(true);
RankProfile profile = builder.getRankProfileRegistry().get(builder.getSchema(), "test");
assertEquals(TensorType.fromSpec("tensor<float>(y{})"),
summaryFeatures(profile).get("test_func_via_func_with_expr").type(profile.typeContext(builder.getQueryProfileRegistry())));
}
@Test
void importedFieldsAreAvailable() throws Exception {
ApplicationBuilder builder = new ApplicationBuilder();
builder.addSchema(joinLines(
"search parent {",
" document parent {",
" field a type tensor(x[5],y[1000]) {",
" indexing: attribute",
" }",
" }",
"}"
));
builder.addSchema(joinLines(
"search child {",
" document child { ",
" field ref type reference<parent> {",
"indexing: attribute | summary",
" }",
" }",
" import field ref.a as imported_a {}",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(attribute(imported_a))",
" }",
" }",
"}"
));
builder.build(true);
}
@Test
void undeclaredQueryFeaturesAreAccepted() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type double {" +
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: query(foo) + f() + sum(attribute(anyfield))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("The following query features");
assertNull(message);
}
@Test
void undeclaredQueryFeaturesAreNotAcceptedWhenStrict() throws Exception {
try {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type double {" +
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" strict: true" +
" first-phase {",
" expression: query(foo) + f() + sum(attribute(anyfield))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
}
catch (IllegalArgumentException e) {
assertEquals("In schema 'test', rank profile 'my_rank_profile': rank profile 'my_rank_profile' " +
"is strict but is missing a declaration of inputs [query(bar), query(baz), query(foo)]",
Exceptions.toMessageString(e));
}
}
@Test
void undeclaredQueryFeaturesAreAcceptedWithWarningWhenUsingTensors() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type tensor(d[2]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: query(foo) + f() + sum(attribute(anyfield))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("The following inputs");
assertNotNull(message);
assertEquals("WARNING: The following inputs used in rank profile 'my_rank_profile' are not declared and " +
"will be interpreted as scalars, not tensors: [query(bar), query(baz), query(foo)]",
message);
}
@Test
void requireThatUsingArrayWarns() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field foo type array<float> {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: map(attribute(foo), f(x)(42*x))",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("collection");
assertNotNull(message);
assertEquals("WARNING: Using attribute(foo) collectiontype: ARRAY in ranking expression will always evaluate to 0.0", message);
}
@Test
@Test
void noWarningWhenUsingTensorsWhenQueryFeaturesAreDeclared() throws Exception {
InspectableDeployLogger logger = new InspectableDeployLogger();
ApplicationBuilder builder = new ApplicationBuilder(logger);
QueryProfileType myType = new QueryProfileType("mytype");
myType.addField(new FieldDescription("rank.feature.query(foo)",
new TensorFieldType(TensorType.fromSpec("tensor(d[2])"))),
builder.getQueryProfileRegistry().getTypeRegistry());
myType.addField(new FieldDescription("rank.feature.query(bar)",
new TensorFieldType(TensorType.fromSpec("tensor(d[2])"))),
builder.getQueryProfileRegistry().getTypeRegistry());
myType.addField(new FieldDescription("rank.feature.query(baz)",
new TensorFieldType(TensorType.fromSpec("tensor(d[2])"))),
builder.getQueryProfileRegistry().getTypeRegistry());
builder.getQueryProfileRegistry().getTypeRegistry().register(myType);
builder.addSchema(joinLines(
"search test {",
" document test { ",
" field anyfield type tensor(d[2]) {",
" indexing: attribute",
" }",
" }",
" rank-profile my_rank_profile {",
" first-phase {",
" expression: sum(query(foo) + f() + sum(attribute(anyfield)))",
" }",
" function f() {",
" expression: query(bar) + query(baz)",
" }",
" }",
"}"
));
builder.build(true);
String message = logger.findMessage("The following query features");
assertNull(message);
}
private Map<String, ReferenceNode> summaryFeatures(RankProfile profile) {
return profile.getSummaryFeatures().stream().collect(Collectors.toMap(f -> f.toString(), f -> f));
}
private static class InspectableDeployLogger implements DeployLogger {
private List<String> messages = new ArrayList<>();
@Override
public void log(Level level, String message) {
messages.add(level + ": " + message);
}
/** Returns the first message containing the given string, or null if none */
public String findMessage(String substring) {
return messages.stream().filter(message -> message.contains(substring)).findFirst().orElse(null);
}
}
} |
```suggestion assertEquals("Don't autoscale: Autoscaling is disabled in single node clusters", fixture.autoscale().toString()); ``` | public void test_autoscaling_in_dev_with_cluster_size_constraint() {
var min = new ClusterResources(4, 1,
new NodeResources(1, 4, 10, 1, NodeResources.DiskSpeed.any));
var max = new ClusterResources(20, 20,
new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any));
var fixture = DynamicProvisioningTester.fixture()
.awsSetup(true, Environment.dev)
.capacity(Capacity.from(min, max, IntRange.of(3, 5), false, true, Optional.empty(), ClusterInfo.empty()))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 200);
assertEquals("Don't autoscale: Autoscaling of resources is not enabled for single node clusters", fixture.autoscale().toString());
} | assertEquals("Don't autoscale: Autoscaling of resources is not enabled for single node clusters", fixture.autoscale().toString()); | public void test_autoscaling_in_dev_with_cluster_size_constraint() {
var min = new ClusterResources(4, 1,
new NodeResources(1, 4, 10, 1, NodeResources.DiskSpeed.any));
var max = new ClusterResources(20, 20,
new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any));
var fixture = DynamicProvisioningTester.fixture()
.awsSetup(true, Environment.dev)
.capacity(Capacity.from(min, max, IntRange.of(3, 5), false, true, Optional.empty(), ClusterInfo.empty()))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 200);
assertEquals("Don't autoscale: Autoscaling is disabled in single node clusters", fixture.autoscale().toString());
} | class AutoscalingTest {
@Test
public void test_autoscaling_with_gpu() {
var resources = new NodeResources(8, 32, 225, 0.1, fast, StorageType.local, NodeResources.Architecture.x86_64, new NodeResources.GpuResources(1, 16));
var min = new ClusterResources( 8, 1, resources);
var now = new ClusterResources(12, 1, resources);
var max = new ClusterResources(12, 1, resources);
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.clusterType(ClusterSpec.Type.container)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester.clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.8f, 0.17, 0.12), 1, true, true, 100);
var result = fixture.autoscale();
assertTrue(result.resources().isEmpty());
assertEquals(Autoscaling.Status.insufficient, result.status());
}
@Test
public void test_autoscaling_nodes_only() {
var resources = new NodeResources(16, 32, 200, 0.1);
var min = new ClusterResources( 8, 1, resources);
var now = new ClusterResources(12, 1, resources.with(StorageType.remote));
var max = new ClusterResources(12, 1, resources);
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.clusterType(ClusterSpec.Type.content)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester.clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.8f, 0.17, 0.12), 1, true, true, 100);
var result = fixture.autoscale();
assertTrue(result.resources().isEmpty());
assertEquals(Autoscaling.Status.insufficient, result.status());
fixture.tester.clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.08f, 0.17, 0.12), 1, true, true, 100);
fixture.tester().assertResources("Scaling down",
8, 1, 16, 32, 200,
fixture.autoscale());
}
@Test
public void test_autoscaling_single_content_group() {
var now = new ClusterResources(5, 1, new NodeResources(2, 16, 750, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.build();
fixture.loader().applyCpuLoad(0.7f, 10);
var scaledResources = fixture.tester().assertResources("Scaling up since resource usage is too high",
9, 1, 2.8, 6.8, 288.7,
fixture.autoscale());
fixture.deploy(Capacity.from(scaledResources));
assertEquals("Cluster in flux -> No further change", Autoscaling.Status.waiting, fixture.autoscale().status());
fixture.deactivateRetired(Capacity.from(scaledResources));
fixture.loader().applyCpuLoad(0.19f, 10);
assertEquals("Load change is small -> No change", Optional.empty(), fixture.autoscale().resources());
fixture.loader().applyCpuLoad(0.1f, 10);
assertEquals("Too little time passed for downscaling -> No change", Optional.empty(), fixture.autoscale().resources());
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.1f, 10);
assertTrue("Last scaling not completed", fixture.autoscale().resources().isEmpty());
fixture.completeLastScaling();
fixture.tester().clock().advance(Duration.ofDays(7));
fixture.loader().applyCpuLoad(0.1f, 10);
fixture.tester().assertResources("Scaling cpu down since usage has gone down significantly",
9, 1, 1.0, 6.5, 243.9,
fixture.autoscale());
}
/** Using too many resources for a short period is proof we should scale up regardless of the time that takes. */
@Test
public void test_no_autoscaling_with_no_measurements() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
assertTrue(fixture.autoscale().resources().isEmpty());
}
@Test
public void test_no_autoscaling_with_no_measurements_exclusive() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(false).build();
assertTrue(fixture.autoscale().resources().isEmpty());
}
/** Using too many resources for a short period is proof we should scale up regardless of the time that takes. */
@Test
public void test_autoscaling_up_is_fast() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.loader().applyLoad(new Load(0.1, 0.1, 0.1), 3);
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 1);
fixture.tester().assertResources("Scaling up since resource usage is too high",
8, 1, 5.3, 17.0, 75.1,
fixture.autoscale());
}
@Test
public void test_container_scaling_down_exclusive() {
var min = new ClusterResources(2, 1, new NodeResources(4, 8, 50, 0.1));
var now = new ClusterResources(8, 1, new NodeResources(4, 8, 50, 0.1));
var max = new ClusterResources(8, 1, new NodeResources(4, 8, 50, 0.1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(false)
.clusterType(ClusterSpec.Type.container)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester().setScalingDuration(fixture.applicationId(), fixture.clusterSpec.id(), Duration.ofMinutes(5));
fixture.loader().applyLoad(new Load(0.01, 0.38, 0), 5);
fixture.tester().assertResources("Scaling down",
2, 1, 4, 8, 50,
fixture.autoscale());
}
@Test
public void initial_deployment_with_host_sharing_flag() {
var min = new ClusterResources(7, 1, new NodeResources(2.0, 10.0, 384.0, 0.1));
var max = new ClusterResources(7, 1, new NodeResources(2.4, 32.0, 768.0, 0.1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(false)
.capacity(Capacity.from(min, max))
.initialResources(Optional.empty())
.hostSharingFlag()
.build();
fixture.tester().assertResources("Initial resources at min, since flag turns on host sharing",
7, 1, 2.0, 10.0, 384.0,
fixture.currentResources().advertisedResources());
}
@Test
public void initial_deployment_with_host_sharing_flag_and_too_small_min() {
var min = new ClusterResources(1, 1, new NodeResources(0.5, 4.0, 10, 0.1));
var max = new ClusterResources(1, 1, new NodeResources(2.0, 8.0, 50, 0.1));
var fixture = DynamicProvisioningTester.fixture()
.awsSetup(false, Environment.test)
.clusterType(ClusterSpec.Type.container)
.capacity(Capacity.from(min, max))
.initialResources(Optional.empty())
.hostSharingFlag()
.build();
fixture.tester().assertResources("Initial resources at min, since flag turns on host sharing",
1, 1, 0.5, 4.0, 10.0,
fixture.currentResources().advertisedResources());
}
/** When scaling up, disregard underutilized dimensions (memory here) */
@Test
public void test_only_autoscaling_up_quickly() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.setScalingDuration(Duration.ofHours(12));
fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10);
fixture.tester().assertResources("Scaling up (only) since resource usage is too high",
5, 1, 11.7, 14.9, 131.5,
fixture.autoscale());
}
/** When ok to scale down, scale in both directions simultaneously (compare to test_only_autoscaling_up_quickly) */
@Test
public void test_scale_in_both_directions_when_ok_to_scale_down() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.setScalingDuration(Duration.ofHours(12));
fixture.tester.clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10);
fixture.tester().assertResources("Scaling cpu and disk up and memory down",
5, 1, 11.7, 4.0, 131.5,
fixture.autoscale());
}
@Test
public void test_scale_in_both_directions_when_ok_to_scale_down_exclusive() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(false).build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester.clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10);
fixture.tester().assertResources("Scaling cpu and disk up, memory follows",
16, 1, 4, 8.0, 28.3,
fixture.autoscale());
}
@Test
public void test_autoscaling_uses_peak() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.loader().applyCpuLoad(0.01, 100);
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
5, 1, 7.1, 11.9, 50.5,
fixture.autoscale());
}
@Test
public void test_autoscaling_uses_peak_exclusive() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(false).build();
fixture.setScalingDuration(Duration.ofHours(9));
fixture.loader().applyCpuLoad(0.01, 100);
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
9, 1, 4, 8.0, 100,
fixture.autoscale());
}
@Test
public void test_autoscaling_uses_peak_preprovisioned() {
var fixture = DynamicProvisioningTester.fixture().hostCount(15).build();
fixture.setScalingDuration(Duration.ofHours(9));
fixture.loader().applyCpuLoad(0.01, 100);
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up cpu since peak resource usage is too high",
5, 1, 7.1, 12.8, 60.0,
fixture.autoscale());
}
@Test
public void test_autoscaling_without_traffic_exclusive() {
var min = new ClusterResources(1, 1, new NodeResources(0.5, 4, 100, 0.3));
var now = new ClusterResources(4, 1, new NodeResources(8, 16, 100, 0.3));
var max = new ClusterResources(4, 1, new NodeResources(16, 32, 500, 0.3));
var fixture = DynamicProvisioningTester.fixture(min, now, max)
.clusterType(ClusterSpec.Type.container)
.awsProdSetup(false)
.build();
var duration = fixture.loader().addMeasurements(new Load(0.04, 0.39, 0.01), 20);
fixture.tester().clock().advance(duration.negated());
fixture.loader().zeroTraffic(20, 1);
fixture.tester().assertResources("Scaled down",
2, 1, 2, 16, 100,
fixture.autoscale());
}
/** We prefer fewer nodes for container clusters as (we assume) they all use the same disk and memory */
@Test
public void test_autoscaling_single_container_group() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).clusterType(ClusterSpec.Type.container).build();
fixture.loader().applyCpuLoad(0.25f, 120);
var scaledResources = fixture.tester().assertResources("Scaling cpu up",
3, 1, 4, 16.0, 40.8,
fixture.autoscale());
fixture.deploy(Capacity.from(scaledResources));
fixture.deactivateRetired(Capacity.from(scaledResources));
fixture.completeLastScaling();
fixture.loader().applyCpuLoad(0.1f, 120);
fixture.tester().assertResources("Scaling down since cpu usage has gone down",
3, 1, 2, 16, 75.0,
fixture.autoscale());
}
@Test
public void autoscaling_handles_disk_setting_changes_exclusive_preprovisioned() {
var resources = new NodeResources(3, 100, 100, 1, slow);
var fixture = DynamicProvisioningTester.fixture()
.hostCount(20)
.hostFlavors(resources)
.initialResources(Optional.of(new ClusterResources(5, 1, resources)))
.capacity(Capacity.from(new ClusterResources(5, 1, resources)))
.build();
assertTrue(fixture.tester().nodeRepository().nodes().list().owner(fixture.applicationId).stream()
.allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == slow));
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.setScalingDuration(Duration.ofHours(6));
fixture.loader().applyCpuLoad(0.25, 120);
ClusterResources min = new ClusterResources( 2, 1,
new NodeResources(1, 1, 1, 1, DiskSpeed.any));
ClusterResources max = new ClusterResources(20, 1,
new NodeResources(100, 1000, 1000, 1, DiskSpeed.any));
var capacity = Capacity.from(min, max);
ClusterResources scaledResources = fixture.tester().assertResources("Scaling up",
13, 1, 1.5, 29.1, 87.3,
fixture.autoscale(capacity));
assertEquals("Disk speed from new capacity is used",
DiskSpeed.any, scaledResources.nodeResources().diskSpeed());
fixture.deploy(Capacity.from(scaledResources));
assertTrue(fixture.nodes().stream()
.allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == DiskSpeed.any));
}
@Test
public void autoscaling_target_preserves_any() {
NodeResources resources = new NodeResources(1, 100, 100, 1);
var capacity = Capacity.from(new ClusterResources( 2, 1, resources.with(DiskSpeed.any)),
new ClusterResources( 10, 1, resources.with(DiskSpeed.any)));
var fixture = DynamicProvisioningTester.fixture()
.capacity(capacity)
.awsProdSetup(true)
.initialResources(Optional.empty())
.build();
assertTrue(fixture.tester().nodeRepository().applications().get(fixture.applicationId).get().cluster(fixture.clusterSpec.id()).get().target().resources().isEmpty());
fixture.deploy();
assertEquals(DiskSpeed.any, fixture.nodes().first().get().allocation().get().requestedResources().diskSpeed());
fixture.deactivateRetired(capacity);
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyCpuLoad(0.8, 120);
assertEquals(DiskSpeed.any, fixture.autoscale(capacity).resources().get().nodeResources().diskSpeed());
}
@Test
public void autoscaling_respects_upper_limit() {
var min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources(5, 1, new NodeResources(1.9, 70, 70, 1));
var max = new ClusterResources( 6, 1, new NodeResources(2.4, 78, 79, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max)).build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyLoad(new Load(0.25, 0.95, 0.95), 120);
fixture.tester().assertResources("Scaling up to limit since resource usage is too high",
6, 1, 2.4, 78.0, 79.0,
fixture.autoscale());
}
@Test
public void autoscaling_respects_lower_limit() {
var min = new ClusterResources( 4, 1, new NodeResources(1.8, 7.4, 8.5, 1));
var max = new ClusterResources( 6, 1, new NodeResources(2.4, 78, 79, 1));
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).capacity(Capacity.from(min, max)).build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.05f, 0.05f, 0.05f), 120);
fixture.tester().assertResources("Scaling down to limit since resource usage is low",
4, 1, 1.8, 7.4, 23.4,
fixture.autoscale());
}
@Test
public void autoscaling_with_unspecified_resources_use_defaults_exclusive() {
var min = new ClusterResources( 2, 1, NodeResources.unspecified());
var max = new ClusterResources( 6, 1, NodeResources.unspecified());
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(false)
.initialResources(Optional.empty())
.capacity(Capacity.from(min, max))
.build();
NodeResources defaultResources =
new CapacityPolicies(fixture.tester().nodeRepository()).specifyFully(NodeResources.unspecified(), fixture.clusterSpec, fixture.applicationId);
fixture.tester().assertResources("Min number of nodes and default resources",
2, 1, defaultResources,
fixture.nodes().toResources());
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.25, 0.95, 0.95), 120);
fixture.tester().assertResources("Scaling up",
5, 1,
defaultResources.vcpu(), defaultResources.memoryGb(), defaultResources.diskGb(),
fixture.autoscale());
}
@Test
public void autoscaling_respects_group_limit() {
var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources(5, 5, new NodeResources(3.0, 10, 10, 1));
var max = new ClusterResources(18, 6, new NodeResources(100, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.5, 240);
fixture.tester().assertResources("Scaling cpu up",
6, 6, 4.5, 7.4, 22.3,
fixture.autoscale());
}
@Test
public void autoscaling_respects_group_size_limit() {
var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 10, 1));
var now = new ClusterResources(5, 5, new NodeResources(3.0, 10, 100, 1));
var max = new ClusterResources(18, 6, new NodeResources(100, 1000, 10000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max, IntRange.of(2, 3), false, true, Optional.empty(), ClusterInfo.empty()))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.6, 240);
fixture.tester().assertResources("Scaling cpu up",
12, 6, 3.0, 4.2, 27.5,
fixture.autoscale());
}
@Test
public void test_autoscaling_limits_when_min_equals_max() {
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).capacity(Capacity.from(min, min)).build();
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyCpuLoad(0.25, 120);
assertEquals(Autoscaling.Status.unavailable, fixture.autoscale().status());
}
@Test
public void container_prefers_remote_disk_when_no_local_match_exclusive() {
var resources = new ClusterResources( 2, 1, new NodeResources(3, 100, 50, 1));
var local = new NodeResources(3, 100, 75, 1, fast, StorageType.local);
var remote = new NodeResources(3, 100, 50, 1, fast, StorageType.remote);
var fixture = DynamicProvisioningTester.fixture()
.dynamicProvisioning(true)
.allowHostSharing(false)
.clusterType(ClusterSpec.Type.container)
.hostFlavors(local, remote)
.capacity(Capacity.from(resources))
.initialResources(Optional.of(new ClusterResources(3, 1, resources.nodeResources())))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.01, 0.01, 0.01), 120);
Autoscaling suggestion = fixture.suggest();
fixture.tester().assertResources("Choosing the remote disk flavor as it has less disk",
2, 1, 3.0, 100.0, 10.0,
suggestion);
assertEquals("Choosing the remote disk flavor as it has less disk",
StorageType.remote, suggestion.resources().get().nodeResources().storageType());
}
@Test
public void suggestions_ignores_limits() {
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).capacity(Capacity.from(min, min)).build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(1.0, 120);
fixture.tester().assertResources("Suggesting above capacity limit",
5, 1, 10.2, 11.9, 50.5,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
}
@Test
public void suggestions_ignores_limits_exclusive() {
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(false).capacity(Capacity.from(min, min)).build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(1.0, 120);
fixture.tester().assertResources("Suggesting above capacity limit",
13, 1, 4, 8, 100.0,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
}
@Test
public void not_using_out_of_service_measurements() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.9, 0.6, 0.7), 1, false, true, 120);
assertTrue("Not scaling up since nodes were measured while cluster was out of service",
fixture.autoscale().resources().isEmpty());
}
@Test
public void not_using_unstable_measurements() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.9, 0.6, 0.7), 1, true, false, 120);
assertTrue("Not scaling up since nodes were measured while cluster was unstable",
fixture.autoscale().resources().isEmpty());
}
@Test
public void too_small_disk_compared_to_memory() {
var resources = new ClusterResources(2, 1, new NodeResources(1, 10, 19, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(resources))
.build();
assertEquals(2, fixture.tester().provisionLogger().applicationLog().size());
assertEquals("WARNING: Requested disk (19.0Gb) in cluster 'cluster1' is not large enough to fit core/heap dumps. Minimum recommended disk resources is 2x memory for containers and 3x memory for content",
fixture.tester().provisionLogger().applicationLog().get(0));
}
@Test
public void autoscaling_shouldnt_choose_too_small_disk_compared_to_memory() {
var min = new ClusterResources(10, 1, new NodeResources(1, 10, 19, 1));
var now = new ClusterResources(10, 1, new NodeResources(5, 50, 150, 1));
var max = new ClusterResources(10, 1, new NodeResources(10, 100, 200, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.5, 0.8, 0.1), 120);
fixture.tester().assertResources("Suggesting resources where disk is 3x memory (this is a content cluster)",
11, 1, 13.0, 60.0, 179.9,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
fixture.tester().assertResources("Autoscaling to resources where disk is 3x memory (this is a content cluster)",
10, 1, 10.0, 66.2, 198.6,
fixture.tester().autoscale(fixture.applicationId, fixture.clusterSpec, Capacity.from(min, max)));
}
@Test
public void autoscaling_shouldnt_choose_too_small_disk_compared_to_memory_exclusive() {
var min = new ClusterResources(10, 1, new NodeResources(1, 10, 19, 1, DiskSpeed.any, StorageType.remote));
var now = new ClusterResources(10, 1, new NodeResources(16, 64, 192, 1, DiskSpeed.any, StorageType.remote));
var max = new ClusterResources(10, 1, new NodeResources(30, 200, 500, 1, DiskSpeed.any, StorageType.remote));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(false)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.5, 0.8, 0.1), 120);
fixture.tester().assertResources("Suggesting resources where disk is 3x memory (this is a content cluster)",
13, 1, 36.0, 72.0, 900.0,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
fixture.tester().assertResources("Autoscaling to resources where disk is 3x memory (this is a content cluster)",
10, 1, 16.0, 64, 247.5,
fixture.tester().autoscale(fixture.applicationId, fixture.clusterSpec, Capacity.from(min, max)));
}
@Test
public void test_autoscaling_group_size_unconstrained() {
var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources(5, 5, new NodeResources(3, 100, 300, 1));
var max = new ClusterResources(20, 20, new NodeResources(10, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.9, 120);
fixture.tester().assertResources("Scaling up to 2 nodes, scaling memory and disk down at the same time",
8, 4, 7.4, 41.5, 124.6,
fixture.autoscale());
}
@Test
public void test_autoscaling_group_size_1() {
var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources(5, 5, new NodeResources(3, 100, 100, 1));
var max = new ClusterResources(20, 20, new NodeResources(10, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max, IntRange.of(1), false, true, Optional.empty(), ClusterInfo.empty()))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.setScalingDuration(Duration.ofHours(6));
fixture.loader().applyCpuLoad(0.9, 120);
fixture.tester().assertResources("Scaling up to 2 nodes, scaling memory and disk down at the same time",
7, 7, 9.4, 78.6, 235.8,
fixture.autoscale());
}
@Test
public void test_autoscaling_groupsize_by_cpu_read_dominated() {
var min = new ClusterResources( 3, 1, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources( 6, 2, new NodeResources(3, 100, 100, 1));
var max = new ClusterResources(21, 7, new NodeResources(100, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
Duration timePassed = fixture.loader().addCpuMeasurements(0.25, 120);
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 200.0 : 100.0, t -> 10.0);
fixture.tester().assertResources("Changing to 1 group is cheaper",
7, 1, 2.5, 43.3, 129.8,
fixture.autoscale());
}
/** Same as above but mostly write traffic, which favors smaller groups */
@Test
public void test_autoscaling_groupsize_by_cpu_write_dominated() {
var min = new ClusterResources( 3, 1, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources(6, 2, new NodeResources(3, 100, 100, 1));
var max = new ClusterResources(21, 7, new NodeResources(100, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
Duration timePassed = fixture.loader().addCpuMeasurements(0.25, 120);
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 20.0 : 10.0, t -> 100.0);
fixture.tester().assertResources("Scaling down since resource usage is too high, changing to 1 group is cheaper",
5, 1, 1.0, 62.6, 187.7,
fixture.autoscale());
}
@Test
public void test_autoscaling_groupsize() {
var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources(6, 2, new NodeResources(10, 100, 100, 1));
var max = new ClusterResources(30, 30, new NodeResources(100, 100, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyMemLoad(1.0, 1000);
fixture.tester().assertResources("Increase group size to reduce memory load",
8, 2, 13.9, 96.3, 288.8,
fixture.autoscale());
}
@Test
public void test_autoscaling_memory_down() {
var min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources(6, 1, new NodeResources(3, 100, 100, 1));
var max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.16, 0.02, 0.5), 120);
fixture.tester().assertResources("Scaling down memory",
6, 1, 2.1, 4.0, 96.2,
fixture.autoscale());
}
@Test
public void scaling_down_only_after_delay() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.loader().applyCpuLoad(0.02, 5);
assertTrue("Too soon after initial deployment", fixture.autoscale().resources().isEmpty());
fixture.tester().clock().advance(Duration.ofHours(12 * 3 + 1));
fixture.loader().applyCpuLoad(0.02, 5);
fixture.tester().assertResources("Scaling down since enough time has passed",
5, 1, 1.0, 11.9, 50.5,
fixture.autoscale());
}
@Test
public void test_autoscaling_considers_read_share() {
var min = new ClusterResources( 1, 1, new NodeResources(3, 100, 100, 1));
var max = new ClusterResources(10, 1, new NodeResources(3, 100, 100, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.capacity(Capacity.from(min, max))
.build();
fixture.tester.clock().advance(Duration.ofDays(1));
fixture.loader().applyCpuLoad(0.5, 120);
fixture.tester().assertResources("Advice to scale up since we set aside for bcp by default",
7, 1, 3, 100, 100,
fixture.autoscale());
fixture.loader().applyCpuLoad(0.5, 120);
fixture.storeReadShare(0.25, 0.5);
fixture.tester().assertResources("Half of global share is the same as the default assumption used above",
7, 1, 3, 100, 100,
fixture.autoscale());
fixture.tester.clock().advance(Duration.ofDays(1));
fixture.loader().applyCpuLoad(0.5, 120);
fixture.storeReadShare(0.5, 0.5);
fixture.tester().assertResources("Advice to scale down since we don't need room for bcp",
4, 1, 3, 100, 100,
fixture.autoscale());
}
@Test
public void test_autoscaling_considers_growth_rate() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
Duration timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 200.0 : 100.0, t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale up since we assume we need 2x cpu for growth when no scaling time data",
5, 1, 2.6, 11.9, 50.5,
fixture.autoscale());
fixture.setScalingDuration(Duration.ofHours(8));
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100, t -> 100.0 + (t < 50 ? t : 100 - t), t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.20, 200);
fixture.tester().assertResources("Scale down since observed growth is slower than scaling time",
5, 1, 1.6, 11.9, 50.5,
fixture.autoscale());
fixture.setScalingDuration(Duration.ofHours(8));
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100,
t -> 100.0 + (t < 50 ? t * t * t : 125000 - (t - 49) * (t - 49) * (t - 49)),
t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale up since observed growth is faster than scaling time",
5, 1, 2.4, 11.9, 50.5,
fixture.autoscale());
}
@Test
public void test_autoscaling_weights_growth_rate_by_confidence() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.setScalingDuration(Duration.ofHours(6));
double scalingFactor = 1.0/6000;
fixture.tester().clock().advance(Duration.ofDays(2));
Duration timeAdded = fixture.loader().addLoadMeasurements(100,
t -> scalingFactor * (100.0 + (t < 50 ? t * t * t : 155000 - (t - 49) * (t - 49) * (t - 49))),
t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.7, 200);
fixture.tester().assertResources("Scale up slightly since observed growth is faster than scaling time, but we are not confident",
5, 1, 2.2, 11.9, 50.5,
fixture.autoscale());
}
@Test
public void test_autoscaling_considers_query_vs_write_rate() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.loader().addCpuMeasurements(0.4, 220);
fixture.tester().clock().advance(Duration.ofDays(2));
var timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 200.0 : 100.0, t -> 100.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester.assertResources("Query and write load is equal -> scale up somewhat",
5, 1, 2.9, 11.9, 50.5,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 800.0 : 400.0, t -> 100.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Query load is 4x write load -> scale up more",
5, 1, 2.2, 11.9, 50.5,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 200.0 : 100.0, t -> 1000.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Write load is 10x query load -> scale down",
5, 1, 1.3, 11.9, 50.5,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 200.0 : 100.0, t-> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Query only -> larger",
5, 1, 3.5, 11.9, 50.5,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100, t -> 0.0, t -> 100.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Write only -> smallest possible",
5, 1, 1.0, 11.9, 50.5,
fixture.autoscale());
}
@Test
public void test_autoscaling_in_dev_preprovisioned() {
var fixture = DynamicProvisioningTester.fixture()
.hostCount(5)
.zone(new Zone(Environment.dev, RegionName.from("us-east")))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 200);
assertTrue("Not attempting to scale up because policies dictate we'll only get one node",
fixture.autoscale().resources().isEmpty());
}
@Test
/** Same setup as test_autoscaling_in_dev(), just with required = true */
@Test
public void test_autoscaling_in_dev_with_required_resources_preprovisioned() {
var requiredCapacity =
Capacity.from(new ClusterResources(2, 1,
new NodeResources(1, 1, 1, 1, NodeResources.DiskSpeed.any)),
new ClusterResources(20, 1,
new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any)),
IntRange.empty(),
true,
true,
Optional.empty(),
ClusterInfo.empty());
var fixture = DynamicProvisioningTester.fixture()
.hostCount(5)
.capacity(requiredCapacity)
.zone(new Zone(Environment.dev, RegionName.from("us-east")))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 200);
fixture.tester().assertResources("We scale even in dev because resources are 'required'",
3, 1, 1.0, 13.4, 62.5,
fixture.autoscale());
}
@Test
public void test_autoscaling_in_dev_with_required_unspecified_resources_preprovisioned() {
var requiredCapacity =
Capacity.from(new ClusterResources(1, 1, NodeResources.unspecified()),
new ClusterResources(3, 1, NodeResources.unspecified()),
IntRange.empty(),
true,
true,
Optional.empty(),
ClusterInfo.empty());
var fixture = DynamicProvisioningTester.fixture()
.hostCount(5)
.capacity(requiredCapacity)
.zone(new Zone(Environment.dev, RegionName.from("us-east")))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 200);
fixture.tester().assertResources("We scale even in dev because resources are required",
3, 1, 1.5, 8, 50,
fixture.autoscale());
}
@Test
public void test_scaling_down_leaves_too_little_headroom() {
var r = new NodeResources(16, 32, 100, 1, NodeResources.DiskSpeed.any);
var min = new ClusterResources( 3, 3, r);
var now = new ClusterResources( 4, 4, r);
var max = new ClusterResources( 5, 5, r);
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(false)
.capacity(Capacity.from(min, max, IntRange.from(1)))
.clusterType(ClusterSpec.Type.content)
.initialResources(Optional.of(now))
.build();
fixture.loader().applyCpuLoad(0.17, 10);
assertTrue("Not scaling down as that would leave just 4.5% headroom before needing to scale up again",
fixture.autoscale().resources().isEmpty());
}
@Test
public void test_changing_exclusivity() {
var min = new ClusterResources( 2, 1, new NodeResources( 3, 8, 100, 1));
var max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.cluster(clusterSpec(true))
.capacity(Capacity.from(min, max))
.initialResources(Optional.empty())
.build();
fixture.tester().assertResources("Initial deployment at minimum",
2, 1, 4, 8, 100,
fixture.currentResources().advertisedResources());
fixture.tester().deploy(fixture.applicationId(), clusterSpec(false), fixture.capacity());
fixture.loader().applyLoad(new Load(0.1, 0.1, 0.1), 5);
fixture.tester().assertResources("Exclusive nodes makes no difference here",
2, 1, 4, 8, 100.0,
fixture.autoscale());
fixture.tester().deploy(fixture.applicationId(), clusterSpec(true), fixture.capacity());
fixture.loader().applyLoad(new Load(0.1, 0.1, 0.1), 5);
fixture.tester().assertResources("Reverts to the initial resources",
2, 1, 4, 8, 100,
fixture.currentResources().advertisedResources());
}
/** Tests an autoscaling scenario which should cause in-place resize. */
@Test
public void test_resize() {
var min = new ClusterResources(7, 1, new NodeResources( 2, 10, 384, 1));
var now = new ClusterResources(7, 1, new NodeResources( 3.4, 16.2, 450.1, 1));
var max = new ClusterResources(7, 1, new NodeResources( 4, 32, 768, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.capacity(Capacity.from(min, max))
.initialResources(Optional.of(now))
.build();
var initialNodes = fixture.nodes().asList();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.06, 0.52, 0.27), 100);
var autoscaling = fixture.autoscale();
fixture.tester().assertResources("Scaling down",
7, 1, 2, 15.8, 384.0,
autoscaling);
fixture.deploy(Capacity.from(autoscaling.resources().get()));
assertEquals("Initial nodes are kept", initialNodes, fixture.nodes().asList());
}
private ClusterSpec clusterSpec(boolean exclusive) {
return ClusterSpec.request(ClusterSpec.Type.content,
ClusterSpec.Id.from("test")).vespaVersion("8.1.2")
.exclusive(exclusive)
.build();
}
@Test
public void change_not_requiring_node_replacement_is_preferred() {
var min = new ClusterResources(5, 1, new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote));
var max = new ClusterResources(6, 1, new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote));
List<Flavor> flavors = List.of(new Flavor("arm_16", new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote, NodeResources.Architecture.arm64)),
new Flavor("x86_16", new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote, NodeResources.Architecture.x86_64)));
var fixture = DynamicProvisioningTester.fixture()
.clusterType(ClusterSpec.Type.container)
.hostFlavors(flavors)
.awsZone(false, Environment.prod)
.capacity(Capacity.from(min, max))
.initialResources(Optional.of(min.with(min.nodeResources().with(NodeResources.Architecture.x86_64))))
.build();
var nodes = fixture.nodes().not().retired().asList();
assertEquals(5, nodes.size());
assertEquals(NodeResources.Architecture.x86_64, nodes.get(0).resources().architecture());
fixture.tester().clock().advance(Duration.ofHours(5));
fixture.loader().applyCpuLoad(0.27, 10);
var autoscaling = fixture.autoscale();
fixture.deploy(Capacity.from(autoscaling.resources().get()));
nodes = fixture.nodes().not().retired().asList();
assertEquals(6, nodes.size());
assertEquals("We stay with x86 even though the first matching flavor is arm",
NodeResources.Architecture.x86_64, nodes.get(0).resources().architecture());
}
} | class AutoscalingTest {
@Test
public void test_autoscaling_with_gpu() {
var resources = new NodeResources(8, 32, 225, 0.1, fast, StorageType.local, NodeResources.Architecture.x86_64, new NodeResources.GpuResources(1, 16));
var min = new ClusterResources( 8, 1, resources);
var now = new ClusterResources(12, 1, resources);
var max = new ClusterResources(12, 1, resources);
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.clusterType(ClusterSpec.Type.container)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester.clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.8f, 0.17, 0.12), 1, true, true, 100);
var result = fixture.autoscale();
assertTrue(result.resources().isEmpty());
assertEquals(Autoscaling.Status.insufficient, result.status());
}
@Test
public void test_autoscaling_nodes_only() {
var resources = new NodeResources(16, 32, 200, 0.1);
var min = new ClusterResources( 8, 1, resources);
var now = new ClusterResources(12, 1, resources.with(StorageType.remote));
var max = new ClusterResources(12, 1, resources);
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.clusterType(ClusterSpec.Type.content)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester.clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.8f, 0.17, 0.12), 1, true, true, 100);
var result = fixture.autoscale();
assertTrue(result.resources().isEmpty());
assertEquals(Autoscaling.Status.insufficient, result.status());
fixture.tester.clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.08f, 0.17, 0.12), 1, true, true, 100);
fixture.tester().assertResources("Scaling down",
8, 1, 16, 32, 200,
fixture.autoscale());
}
@Test
public void test_autoscaling_single_content_group() {
var now = new ClusterResources(5, 1, new NodeResources(2, 16, 750, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.build();
fixture.loader().applyCpuLoad(0.7f, 10);
var scaledResources = fixture.tester().assertResources("Scaling up since resource usage is too high",
9, 1, 2.8, 6.8, 288.7,
fixture.autoscale());
fixture.deploy(Capacity.from(scaledResources));
assertEquals("Cluster in flux -> No further change", Autoscaling.Status.waiting, fixture.autoscale().status());
fixture.deactivateRetired(Capacity.from(scaledResources));
fixture.loader().applyCpuLoad(0.19f, 10);
assertEquals("Load change is small -> No change", Optional.empty(), fixture.autoscale().resources());
fixture.loader().applyCpuLoad(0.1f, 10);
assertEquals("Too little time passed for downscaling -> No change", Optional.empty(), fixture.autoscale().resources());
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.1f, 10);
assertTrue("Last scaling not completed", fixture.autoscale().resources().isEmpty());
fixture.completeLastScaling();
fixture.tester().clock().advance(Duration.ofDays(7));
fixture.loader().applyCpuLoad(0.1f, 10);
fixture.tester().assertResources("Scaling cpu down since usage has gone down significantly",
9, 1, 1.0, 6.5, 243.9,
fixture.autoscale());
}
/** Using too many resources for a short period is proof we should scale up regardless of the time that takes. */
@Test
public void test_no_autoscaling_with_no_measurements() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
assertTrue(fixture.autoscale().resources().isEmpty());
}
@Test
public void test_no_autoscaling_with_no_measurements_exclusive() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(false).build();
assertTrue(fixture.autoscale().resources().isEmpty());
}
/** Using too many resources for a short period is proof we should scale up regardless of the time that takes. */
@Test
public void test_autoscaling_up_is_fast() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.loader().applyLoad(new Load(0.1, 0.1, 0.1), 3);
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 1);
fixture.tester().assertResources("Scaling up since resource usage is too high",
8, 1, 5.3, 17.0, 75.1,
fixture.autoscale());
}
@Test
public void test_container_scaling_down_exclusive() {
var min = new ClusterResources(2, 1, new NodeResources(4, 8, 50, 0.1));
var now = new ClusterResources(8, 1, new NodeResources(4, 8, 50, 0.1));
var max = new ClusterResources(8, 1, new NodeResources(4, 8, 50, 0.1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(false)
.clusterType(ClusterSpec.Type.container)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester().setScalingDuration(fixture.applicationId(), fixture.clusterSpec.id(), Duration.ofMinutes(5));
fixture.loader().applyLoad(new Load(0.01, 0.38, 0), 5);
fixture.tester().assertResources("Scaling down",
2, 1, 4, 8, 50,
fixture.autoscale());
}
@Test
public void initial_deployment_with_host_sharing_flag() {
var min = new ClusterResources(7, 1, new NodeResources(2.0, 10.0, 384.0, 0.1));
var max = new ClusterResources(7, 1, new NodeResources(2.4, 32.0, 768.0, 0.1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(false)
.capacity(Capacity.from(min, max))
.initialResources(Optional.empty())
.hostSharingFlag()
.build();
fixture.tester().assertResources("Initial resources at min, since flag turns on host sharing",
7, 1, 2.0, 10.0, 384.0,
fixture.currentResources().advertisedResources());
}
@Test
public void initial_deployment_with_host_sharing_flag_and_too_small_min() {
var min = new ClusterResources(1, 1, new NodeResources(0.5, 4.0, 10, 0.1));
var max = new ClusterResources(1, 1, new NodeResources(2.0, 8.0, 50, 0.1));
var fixture = DynamicProvisioningTester.fixture()
.awsSetup(false, Environment.test)
.clusterType(ClusterSpec.Type.container)
.capacity(Capacity.from(min, max))
.initialResources(Optional.empty())
.hostSharingFlag()
.build();
fixture.tester().assertResources("Initial resources at min, since flag turns on host sharing",
1, 1, 0.5, 4.0, 10.0,
fixture.currentResources().advertisedResources());
}
/** When scaling up, disregard underutilized dimensions (memory here) */
@Test
public void test_only_autoscaling_up_quickly() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.setScalingDuration(Duration.ofHours(12));
fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10);
fixture.tester().assertResources("Scaling up (only) since resource usage is too high",
5, 1, 11.7, 14.9, 131.5,
fixture.autoscale());
}
/** When ok to scale down, scale in both directions simultaneously (compare to test_only_autoscaling_up_quickly) */
@Test
public void test_scale_in_both_directions_when_ok_to_scale_down() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.setScalingDuration(Duration.ofHours(12));
fixture.tester.clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10);
fixture.tester().assertResources("Scaling cpu and disk up and memory down",
5, 1, 11.7, 4.0, 131.5,
fixture.autoscale());
}
@Test
public void test_scale_in_both_directions_when_ok_to_scale_down_exclusive() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(false).build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester.clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10);
fixture.tester().assertResources("Scaling cpu and disk up, memory follows",
16, 1, 4, 8.0, 28.3,
fixture.autoscale());
}
@Test
public void test_autoscaling_uses_peak() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.loader().applyCpuLoad(0.01, 100);
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
5, 1, 7.1, 11.9, 50.5,
fixture.autoscale());
}
@Test
public void test_autoscaling_uses_peak_exclusive() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(false).build();
fixture.setScalingDuration(Duration.ofHours(9));
fixture.loader().applyCpuLoad(0.01, 100);
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
9, 1, 4, 8.0, 100,
fixture.autoscale());
}
@Test
public void test_autoscaling_uses_peak_preprovisioned() {
var fixture = DynamicProvisioningTester.fixture().hostCount(15).build();
fixture.setScalingDuration(Duration.ofHours(9));
fixture.loader().applyCpuLoad(0.01, 100);
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up cpu since peak resource usage is too high",
5, 1, 7.1, 12.8, 60.0,
fixture.autoscale());
}
@Test
public void test_autoscaling_without_traffic_exclusive() {
var min = new ClusterResources(1, 1, new NodeResources(0.5, 4, 100, 0.3));
var now = new ClusterResources(4, 1, new NodeResources(8, 16, 100, 0.3));
var max = new ClusterResources(4, 1, new NodeResources(16, 32, 500, 0.3));
var fixture = DynamicProvisioningTester.fixture(min, now, max)
.clusterType(ClusterSpec.Type.container)
.awsProdSetup(false)
.build();
var duration = fixture.loader().addMeasurements(new Load(0.04, 0.39, 0.01), 20);
fixture.tester().clock().advance(duration.negated());
fixture.loader().zeroTraffic(20, 1);
fixture.tester().assertResources("Scaled down",
2, 1, 2, 16, 100,
fixture.autoscale());
}
/** We prefer fewer nodes for container clusters as (we assume) they all use the same disk and memory */
@Test
public void test_autoscaling_single_container_group() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).clusterType(ClusterSpec.Type.container).build();
fixture.loader().applyCpuLoad(0.25f, 120);
var scaledResources = fixture.tester().assertResources("Scaling cpu up",
3, 1, 4, 16.0, 40.8,
fixture.autoscale());
fixture.deploy(Capacity.from(scaledResources));
fixture.deactivateRetired(Capacity.from(scaledResources));
fixture.completeLastScaling();
fixture.loader().applyCpuLoad(0.1f, 120);
fixture.tester().assertResources("Scaling down since cpu usage has gone down",
3, 1, 2, 16, 75.0,
fixture.autoscale());
}
@Test
public void autoscaling_handles_disk_setting_changes_exclusive_preprovisioned() {
var resources = new NodeResources(3, 100, 100, 1, slow);
var fixture = DynamicProvisioningTester.fixture()
.hostCount(20)
.hostFlavors(resources)
.initialResources(Optional.of(new ClusterResources(5, 1, resources)))
.capacity(Capacity.from(new ClusterResources(5, 1, resources)))
.build();
assertTrue(fixture.tester().nodeRepository().nodes().list().owner(fixture.applicationId).stream()
.allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == slow));
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.setScalingDuration(Duration.ofHours(6));
fixture.loader().applyCpuLoad(0.25, 120);
ClusterResources min = new ClusterResources( 2, 1,
new NodeResources(1, 1, 1, 1, DiskSpeed.any));
ClusterResources max = new ClusterResources(20, 1,
new NodeResources(100, 1000, 1000, 1, DiskSpeed.any));
var capacity = Capacity.from(min, max);
ClusterResources scaledResources = fixture.tester().assertResources("Scaling up",
13, 1, 1.5, 29.1, 87.3,
fixture.autoscale(capacity));
assertEquals("Disk speed from new capacity is used",
DiskSpeed.any, scaledResources.nodeResources().diskSpeed());
fixture.deploy(Capacity.from(scaledResources));
assertTrue(fixture.nodes().stream()
.allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == DiskSpeed.any));
}
@Test
public void autoscaling_target_preserves_any() {
NodeResources resources = new NodeResources(1, 100, 100, 1);
var capacity = Capacity.from(new ClusterResources( 2, 1, resources.with(DiskSpeed.any)),
new ClusterResources( 10, 1, resources.with(DiskSpeed.any)));
var fixture = DynamicProvisioningTester.fixture()
.capacity(capacity)
.awsProdSetup(true)
.initialResources(Optional.empty())
.build();
assertTrue(fixture.tester().nodeRepository().applications().get(fixture.applicationId).get().cluster(fixture.clusterSpec.id()).get().target().resources().isEmpty());
fixture.deploy();
assertEquals(DiskSpeed.any, fixture.nodes().first().get().allocation().get().requestedResources().diskSpeed());
fixture.deactivateRetired(capacity);
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyCpuLoad(0.8, 120);
assertEquals(DiskSpeed.any, fixture.autoscale(capacity).resources().get().nodeResources().diskSpeed());
}
@Test
public void autoscaling_respects_upper_limit() {
var min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources(5, 1, new NodeResources(1.9, 70, 70, 1));
var max = new ClusterResources( 6, 1, new NodeResources(2.4, 78, 79, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max)).build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyLoad(new Load(0.25, 0.95, 0.95), 120);
fixture.tester().assertResources("Scaling up to limit since resource usage is too high",
6, 1, 2.4, 78.0, 79.0,
fixture.autoscale());
}
@Test
public void autoscaling_respects_lower_limit() {
var min = new ClusterResources( 4, 1, new NodeResources(1.8, 7.4, 8.5, 1));
var max = new ClusterResources( 6, 1, new NodeResources(2.4, 78, 79, 1));
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).capacity(Capacity.from(min, max)).build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.05f, 0.05f, 0.05f), 120);
fixture.tester().assertResources("Scaling down to limit since resource usage is low",
4, 1, 1.8, 7.4, 23.4,
fixture.autoscale());
}
@Test
public void autoscaling_with_unspecified_resources_use_defaults_exclusive() {
var min = new ClusterResources( 2, 1, NodeResources.unspecified());
var max = new ClusterResources( 6, 1, NodeResources.unspecified());
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(false)
.initialResources(Optional.empty())
.capacity(Capacity.from(min, max))
.build();
NodeResources defaultResources =
new CapacityPolicies(fixture.tester().nodeRepository()).specifyFully(NodeResources.unspecified(), fixture.clusterSpec, fixture.applicationId);
fixture.tester().assertResources("Min number of nodes and default resources",
2, 1, defaultResources,
fixture.nodes().toResources());
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.25, 0.95, 0.95), 120);
fixture.tester().assertResources("Scaling up",
5, 1,
defaultResources.vcpu(), defaultResources.memoryGb(), defaultResources.diskGb(),
fixture.autoscale());
}
@Test
public void autoscaling_respects_group_limit() {
var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources(5, 5, new NodeResources(3.0, 10, 10, 1));
var max = new ClusterResources(18, 6, new NodeResources(100, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.5, 240);
fixture.tester().assertResources("Scaling cpu up",
6, 6, 4.5, 7.4, 22.3,
fixture.autoscale());
}
@Test
public void autoscaling_respects_group_size_limit() {
var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 10, 1));
var now = new ClusterResources(5, 5, new NodeResources(3.0, 10, 100, 1));
var max = new ClusterResources(18, 6, new NodeResources(100, 1000, 10000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max, IntRange.of(2, 3), false, true, Optional.empty(), ClusterInfo.empty()))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.6, 240);
fixture.tester().assertResources("Scaling cpu up",
12, 6, 3.0, 4.2, 27.5,
fixture.autoscale());
}
@Test
public void test_autoscaling_limits_when_min_equals_max() {
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).capacity(Capacity.from(min, min)).build();
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyCpuLoad(0.25, 120);
assertEquals(Autoscaling.Status.unavailable, fixture.autoscale().status());
}
@Test
public void container_prefers_remote_disk_when_no_local_match_exclusive() {
var resources = new ClusterResources( 2, 1, new NodeResources(3, 100, 50, 1));
var local = new NodeResources(3, 100, 75, 1, fast, StorageType.local);
var remote = new NodeResources(3, 100, 50, 1, fast, StorageType.remote);
var fixture = DynamicProvisioningTester.fixture()
.dynamicProvisioning(true)
.allowHostSharing(false)
.clusterType(ClusterSpec.Type.container)
.hostFlavors(local, remote)
.capacity(Capacity.from(resources))
.initialResources(Optional.of(new ClusterResources(3, 1, resources.nodeResources())))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.01, 0.01, 0.01), 120);
Autoscaling suggestion = fixture.suggest();
fixture.tester().assertResources("Choosing the remote disk flavor as it has less disk",
2, 1, 3.0, 100.0, 10.0,
suggestion);
assertEquals("Choosing the remote disk flavor as it has less disk",
StorageType.remote, suggestion.resources().get().nodeResources().storageType());
}
@Test
public void suggestions_ignores_limits() {
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).capacity(Capacity.from(min, min)).build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(1.0, 120);
fixture.tester().assertResources("Suggesting above capacity limit",
5, 1, 10.2, 11.9, 50.5,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
}
@Test
public void suggestions_ignores_limits_exclusive() {
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(false).capacity(Capacity.from(min, min)).build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(1.0, 120);
fixture.tester().assertResources("Suggesting above capacity limit",
13, 1, 4, 8, 100.0,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
}
@Test
public void not_using_out_of_service_measurements() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.9, 0.6, 0.7), 1, false, true, 120);
assertTrue("Not scaling up since nodes were measured while cluster was out of service",
fixture.autoscale().resources().isEmpty());
}
@Test
public void not_using_unstable_measurements() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.9, 0.6, 0.7), 1, true, false, 120);
assertTrue("Not scaling up since nodes were measured while cluster was unstable",
fixture.autoscale().resources().isEmpty());
}
@Test
public void too_small_disk_compared_to_memory() {
var resources = new ClusterResources(2, 1, new NodeResources(1, 10, 19, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(resources))
.build();
assertEquals(2, fixture.tester().provisionLogger().applicationLog().size());
assertEquals("WARNING: Requested disk (19.0Gb) in cluster 'cluster1' is not large enough to fit core/heap dumps. Minimum recommended disk resources is 2x memory for containers and 3x memory for content",
fixture.tester().provisionLogger().applicationLog().get(0));
}
@Test
public void autoscaling_shouldnt_choose_too_small_disk_compared_to_memory() {
var min = new ClusterResources(10, 1, new NodeResources(1, 10, 19, 1));
var now = new ClusterResources(10, 1, new NodeResources(5, 50, 150, 1));
var max = new ClusterResources(10, 1, new NodeResources(10, 100, 200, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.5, 0.8, 0.1), 120);
fixture.tester().assertResources("Suggesting resources where disk is 3x memory (this is a content cluster)",
11, 1, 13.0, 60.0, 179.9,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
fixture.tester().assertResources("Autoscaling to resources where disk is 3x memory (this is a content cluster)",
10, 1, 10.0, 66.2, 198.6,
fixture.tester().autoscale(fixture.applicationId, fixture.clusterSpec, Capacity.from(min, max)));
}
@Test
public void autoscaling_shouldnt_choose_too_small_disk_compared_to_memory_exclusive() {
var min = new ClusterResources(10, 1, new NodeResources(1, 10, 19, 1, DiskSpeed.any, StorageType.remote));
var now = new ClusterResources(10, 1, new NodeResources(16, 64, 192, 1, DiskSpeed.any, StorageType.remote));
var max = new ClusterResources(10, 1, new NodeResources(30, 200, 500, 1, DiskSpeed.any, StorageType.remote));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(false)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.5, 0.8, 0.1), 120);
fixture.tester().assertResources("Suggesting resources where disk is 3x memory (this is a content cluster)",
13, 1, 36.0, 72.0, 900.0,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
fixture.tester().assertResources("Autoscaling to resources where disk is 3x memory (this is a content cluster)",
10, 1, 16.0, 64, 247.5,
fixture.tester().autoscale(fixture.applicationId, fixture.clusterSpec, Capacity.from(min, max)));
}
@Test
public void test_autoscaling_group_size_unconstrained() {
var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources(5, 5, new NodeResources(3, 100, 300, 1));
var max = new ClusterResources(20, 20, new NodeResources(10, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.9, 120);
fixture.tester().assertResources("Scaling up to 2 nodes, scaling memory and disk down at the same time",
8, 4, 7.4, 41.5, 124.6,
fixture.autoscale());
}
@Test
public void test_autoscaling_group_size_1() {
var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources(5, 5, new NodeResources(3, 100, 100, 1));
var max = new ClusterResources(20, 20, new NodeResources(10, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max, IntRange.of(1), false, true, Optional.empty(), ClusterInfo.empty()))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.setScalingDuration(Duration.ofHours(6));
fixture.loader().applyCpuLoad(0.9, 120);
fixture.tester().assertResources("Scaling up to 2 nodes, scaling memory and disk down at the same time",
7, 7, 9.4, 78.6, 235.8,
fixture.autoscale());
}
@Test
public void test_autoscaling_groupsize_by_cpu_read_dominated() {
var min = new ClusterResources( 3, 1, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources( 6, 2, new NodeResources(3, 100, 100, 1));
var max = new ClusterResources(21, 7, new NodeResources(100, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
Duration timePassed = fixture.loader().addCpuMeasurements(0.25, 120);
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 200.0 : 100.0, t -> 10.0);
fixture.tester().assertResources("Changing to 1 group is cheaper",
7, 1, 2.5, 43.3, 129.8,
fixture.autoscale());
}
/** Same as above but mostly write traffic, which favors smaller groups */
@Test
public void test_autoscaling_groupsize_by_cpu_write_dominated() {
var min = new ClusterResources( 3, 1, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources(6, 2, new NodeResources(3, 100, 100, 1));
var max = new ClusterResources(21, 7, new NodeResources(100, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
Duration timePassed = fixture.loader().addCpuMeasurements(0.25, 120);
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 20.0 : 10.0, t -> 100.0);
fixture.tester().assertResources("Scaling down since resource usage is too high, changing to 1 group is cheaper",
5, 1, 1.0, 62.6, 187.7,
fixture.autoscale());
}
@Test
public void test_autoscaling_groupsize() {
var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources(6, 2, new NodeResources(10, 100, 100, 1));
var max = new ClusterResources(30, 30, new NodeResources(100, 100, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyMemLoad(1.0, 1000);
fixture.tester().assertResources("Increase group size to reduce memory load",
8, 2, 13.9, 96.3, 288.8,
fixture.autoscale());
}
@Test
public void test_autoscaling_memory_down() {
var min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
var now = new ClusterResources(6, 1, new NodeResources(3, 100, 100, 1));
var max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.16, 0.02, 0.5), 120);
fixture.tester().assertResources("Scaling down memory",
6, 1, 2.1, 4.0, 96.2,
fixture.autoscale());
}
@Test
public void scaling_down_only_after_delay() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.loader().applyCpuLoad(0.02, 5);
assertTrue("Too soon after initial deployment", fixture.autoscale().resources().isEmpty());
fixture.tester().clock().advance(Duration.ofHours(12 * 3 + 1));
fixture.loader().applyCpuLoad(0.02, 5);
fixture.tester().assertResources("Scaling down since enough time has passed",
5, 1, 1.0, 11.9, 50.5,
fixture.autoscale());
}
@Test
public void test_autoscaling_considers_read_share() {
var min = new ClusterResources( 1, 1, new NodeResources(3, 100, 100, 1));
var max = new ClusterResources(10, 1, new NodeResources(3, 100, 100, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.capacity(Capacity.from(min, max))
.build();
fixture.tester.clock().advance(Duration.ofDays(1));
fixture.loader().applyCpuLoad(0.5, 120);
fixture.tester().assertResources("Advice to scale up since we set aside for bcp by default",
7, 1, 3, 100, 100,
fixture.autoscale());
fixture.loader().applyCpuLoad(0.5, 120);
fixture.storeReadShare(0.25, 0.5);
fixture.tester().assertResources("Half of global share is the same as the default assumption used above",
7, 1, 3, 100, 100,
fixture.autoscale());
fixture.tester.clock().advance(Duration.ofDays(1));
fixture.loader().applyCpuLoad(0.5, 120);
fixture.storeReadShare(0.5, 0.5);
fixture.tester().assertResources("Advice to scale down since we don't need room for bcp",
4, 1, 3, 100, 100,
fixture.autoscale());
}
@Test
public void test_autoscaling_considers_growth_rate() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
Duration timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 200.0 : 100.0, t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale up since we assume we need 2x cpu for growth when no scaling time data",
5, 1, 2.6, 11.9, 50.5,
fixture.autoscale());
fixture.setScalingDuration(Duration.ofHours(8));
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100, t -> 100.0 + (t < 50 ? t : 100 - t), t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.20, 200);
fixture.tester().assertResources("Scale down since observed growth is slower than scaling time",
5, 1, 1.6, 11.9, 50.5,
fixture.autoscale());
fixture.setScalingDuration(Duration.ofHours(8));
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100,
t -> 100.0 + (t < 50 ? t * t * t : 125000 - (t - 49) * (t - 49) * (t - 49)),
t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale up since observed growth is faster than scaling time",
5, 1, 2.4, 11.9, 50.5,
fixture.autoscale());
}
@Test
public void test_autoscaling_weights_growth_rate_by_confidence() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.setScalingDuration(Duration.ofHours(6));
double scalingFactor = 1.0/6000;
fixture.tester().clock().advance(Duration.ofDays(2));
Duration timeAdded = fixture.loader().addLoadMeasurements(100,
t -> scalingFactor * (100.0 + (t < 50 ? t * t * t : 155000 - (t - 49) * (t - 49) * (t - 49))),
t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.7, 200);
fixture.tester().assertResources("Scale up slightly since observed growth is faster than scaling time, but we are not confident",
5, 1, 2.2, 11.9, 50.5,
fixture.autoscale());
}
@Test
public void test_autoscaling_considers_query_vs_write_rate() {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.setScalingDuration(Duration.ofHours(6));
fixture.loader().addCpuMeasurements(0.4, 220);
fixture.tester().clock().advance(Duration.ofDays(2));
var timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 200.0 : 100.0, t -> 100.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester.assertResources("Query and write load is equal -> scale up somewhat",
5, 1, 2.9, 11.9, 50.5,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 800.0 : 400.0, t -> 100.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Query load is 4x write load -> scale up more",
5, 1, 2.2, 11.9, 50.5,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 200.0 : 100.0, t -> 1000.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Write load is 10x query load -> scale down",
5, 1, 1.3, 11.9, 50.5,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 200.0 : 100.0, t-> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Query only -> larger",
5, 1, 3.5, 11.9, 50.5,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100, t -> 0.0, t -> 100.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Write only -> smallest possible",
5, 1, 1.0, 11.9, 50.5,
fixture.autoscale());
}
@Test
public void test_autoscaling_in_dev_preprovisioned() {
var fixture = DynamicProvisioningTester.fixture()
.hostCount(5)
.zone(new Zone(Environment.dev, RegionName.from("us-east")))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 200);
assertTrue("Not attempting to scale up because policies dictate we'll only get one node",
fixture.autoscale().resources().isEmpty());
}
@Test
/** Same setup as test_autoscaling_in_dev(), just with required = true */
@Test
public void test_autoscaling_in_dev_with_required_resources_preprovisioned() {
var requiredCapacity =
Capacity.from(new ClusterResources(2, 1,
new NodeResources(1, 1, 1, 1, NodeResources.DiskSpeed.any)),
new ClusterResources(20, 1,
new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any)),
IntRange.empty(),
true,
true,
Optional.empty(),
ClusterInfo.empty());
var fixture = DynamicProvisioningTester.fixture()
.hostCount(5)
.capacity(requiredCapacity)
.zone(new Zone(Environment.dev, RegionName.from("us-east")))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 200);
fixture.tester().assertResources("We scale even in dev because resources are 'required'",
3, 1, 1.0, 13.4, 62.5,
fixture.autoscale());
}
@Test
public void test_autoscaling_in_dev_with_required_unspecified_resources_preprovisioned() {
var requiredCapacity =
Capacity.from(new ClusterResources(1, 1, NodeResources.unspecified()),
new ClusterResources(3, 1, NodeResources.unspecified()),
IntRange.empty(),
true,
true,
Optional.empty(),
ClusterInfo.empty());
var fixture = DynamicProvisioningTester.fixture()
.hostCount(5)
.capacity(requiredCapacity)
.zone(new Zone(Environment.dev, RegionName.from("us-east")))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 200);
fixture.tester().assertResources("We scale even in dev because resources are required",
3, 1, 1.5, 8, 50,
fixture.autoscale());
}
@Test
public void test_scaling_down_leaves_too_little_headroom() {
var r = new NodeResources(16, 32, 100, 1, NodeResources.DiskSpeed.any);
var min = new ClusterResources( 3, 3, r);
var now = new ClusterResources( 4, 4, r);
var max = new ClusterResources( 5, 5, r);
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(false)
.capacity(Capacity.from(min, max, IntRange.from(1)))
.clusterType(ClusterSpec.Type.content)
.initialResources(Optional.of(now))
.build();
fixture.loader().applyCpuLoad(0.17, 10);
assertTrue("Not scaling down as that would leave just 4.5% headroom before needing to scale up again",
fixture.autoscale().resources().isEmpty());
}
@Test
public void test_changing_exclusivity() {
var min = new ClusterResources( 2, 1, new NodeResources( 3, 8, 100, 1));
var max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.cluster(clusterSpec(true))
.capacity(Capacity.from(min, max))
.initialResources(Optional.empty())
.build();
fixture.tester().assertResources("Initial deployment at minimum",
2, 1, 4, 8, 100,
fixture.currentResources().advertisedResources());
fixture.tester().deploy(fixture.applicationId(), clusterSpec(false), fixture.capacity());
fixture.loader().applyLoad(new Load(0.1, 0.1, 0.1), 5);
fixture.tester().assertResources("Exclusive nodes makes no difference here",
2, 1, 4, 8, 100.0,
fixture.autoscale());
fixture.tester().deploy(fixture.applicationId(), clusterSpec(true), fixture.capacity());
fixture.loader().applyLoad(new Load(0.1, 0.1, 0.1), 5);
fixture.tester().assertResources("Reverts to the initial resources",
2, 1, 4, 8, 100,
fixture.currentResources().advertisedResources());
}
/** Tests an autoscaling scenario which should cause in-place resize. */
@Test
public void test_resize() {
var min = new ClusterResources(7, 1, new NodeResources( 2, 10, 384, 1));
var now = new ClusterResources(7, 1, new NodeResources( 3.4, 16.2, 450.1, 1));
var max = new ClusterResources(7, 1, new NodeResources( 4, 32, 768, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.capacity(Capacity.from(min, max))
.initialResources(Optional.of(now))
.build();
var initialNodes = fixture.nodes().asList();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.06, 0.52, 0.27), 100);
var autoscaling = fixture.autoscale();
fixture.tester().assertResources("Scaling down",
7, 1, 2, 15.8, 384.0,
autoscaling);
fixture.deploy(Capacity.from(autoscaling.resources().get()));
assertEquals("Initial nodes are kept", initialNodes, fixture.nodes().asList());
}
private ClusterSpec clusterSpec(boolean exclusive) {
return ClusterSpec.request(ClusterSpec.Type.content,
ClusterSpec.Id.from("test")).vespaVersion("8.1.2")
.exclusive(exclusive)
.build();
}
@Test
public void change_not_requiring_node_replacement_is_preferred() {
var min = new ClusterResources(5, 1, new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote));
var max = new ClusterResources(6, 1, new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote));
List<Flavor> flavors = List.of(new Flavor("arm_16", new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote, NodeResources.Architecture.arm64)),
new Flavor("x86_16", new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote, NodeResources.Architecture.x86_64)));
var fixture = DynamicProvisioningTester.fixture()
.clusterType(ClusterSpec.Type.container)
.hostFlavors(flavors)
.awsZone(false, Environment.prod)
.capacity(Capacity.from(min, max))
.initialResources(Optional.of(min.with(min.nodeResources().with(NodeResources.Architecture.x86_64))))
.build();
var nodes = fixture.nodes().not().retired().asList();
assertEquals(5, nodes.size());
assertEquals(NodeResources.Architecture.x86_64, nodes.get(0).resources().architecture());
fixture.tester().clock().advance(Duration.ofHours(5));
fixture.loader().applyCpuLoad(0.27, 10);
var autoscaling = fixture.autoscale();
fixture.deploy(Capacity.from(autoscaling.resources().get()));
nodes = fixture.nodes().not().retired().asList();
assertEquals(6, nodes.size());
assertEquals("We stay with x86 even though the first matching flavor is arm",
NodeResources.Architecture.x86_64, nodes.get(0).resources().architecture());
}
} |
```suggestion // Use document field if summary field is not found ``` | else if (exp instanceof SummaryExpression) {
Field field = schema.getSummaryField(fieldName);
if (field == null) {
var sdField = schema.getConcreteField(fieldName);
if (sdField != null && sdField.doesSummarying()) {
fieldType = sdField.getDataType();
} else {
throw new IllegalArgumentException("Summary field '" + fieldName + "' not found.");
}
} else {
fieldType = field.getDataType();
}
} | else if (exp instanceof SummaryExpression) {
Field field = schema.getSummaryField(fieldName);
if (field == null) {
var sdField = schema.getConcreteField(fieldName);
if (sdField != null && sdField.doesSummarying()) {
fieldType = sdField.getDataType();
} else {
throw new IllegalArgumentException("Summary field '" + fieldName + "' not found.");
}
} else {
fieldType = field.getDataType();
}
} | class TypedTransformProvider extends ValueTransformProvider {
private final Schema schema;
private DataType fieldType;
TypedTransformProvider(Class<? extends Expression> transformClass, Schema schema) {
super(transformClass);
this.schema = schema;
}
@Override
protected final boolean requiresTransform(Expression exp) {
if (exp instanceof OutputExpression) {
String fieldName = ((OutputExpression)exp).getFieldName();
if (exp instanceof AttributeExpression) {
Attribute attribute = schema.getAttribute(fieldName);
if (attribute == null)
throw new IllegalArgumentException("Attribute '" + fieldName + "' not found.");
fieldType = attribute.getDataType();
}
else if (exp instanceof IndexExpression) {
Field field = schema.getConcreteField(fieldName);
if (field == null)
throw new IllegalArgumentException("Index field '" + fieldName + "' not found.");
fieldType = field.getDataType();
}
else {
throw new UnsupportedOperationException();
}
}
return requiresTransform(exp, fieldType);
}
@Override
protected final Expression newTransform() {
return newTransform(fieldType);
}
protected abstract boolean requiresTransform(Expression exp, DataType fieldType);
protected abstract Expression newTransform(DataType fieldType);
} | class TypedTransformProvider extends ValueTransformProvider {
private final Schema schema;
private DataType fieldType;
TypedTransformProvider(Class<? extends Expression> transformClass, Schema schema) {
super(transformClass);
this.schema = schema;
}
@Override
protected final boolean requiresTransform(Expression exp) {
if (exp instanceof OutputExpression) {
String fieldName = ((OutputExpression)exp).getFieldName();
if (exp instanceof AttributeExpression) {
Attribute attribute = schema.getAttribute(fieldName);
if (attribute == null)
throw new IllegalArgumentException("Attribute '" + fieldName + "' not found.");
fieldType = attribute.getDataType();
}
else if (exp instanceof IndexExpression) {
Field field = schema.getConcreteField(fieldName);
if (field == null)
throw new IllegalArgumentException("Index field '" + fieldName + "' not found.");
fieldType = field.getDataType();
}
else {
throw new UnsupportedOperationException();
}
}
return requiresTransform(exp, fieldType);
}
@Override
protected final Expression newTransform() {
return newTransform(fieldType);
}
protected abstract boolean requiresTransform(Expression exp, DataType fieldType);
protected abstract Expression newTransform(DataType fieldType);
} | |
Please add the rationale for why this is needed, with reference to what is currently done in AddExtraFieldsToDocument.java (that will be removed in a few months). | protected Expression doConvert(Expression exp) {
List<Expression> ret = new LinkedList<>();
if (exp instanceof AttributeExpression) {
ret.add(new AttributeExpression(field.getName()));
} else if (exp instanceof IndexExpression) {
ret.add(new IndexExpression(field.getName()));
} else if (exp instanceof SummaryExpression) {
for (String fieldName : summaryFields) {
ret.add(new SummaryExpression(fieldName));
}
if (!summaryFields.contains(field.getName())) {
ret.add(new SummaryExpression(field.getName()));
}
} else {
throw new UnsupportedOperationException(exp.getClass().getName());
}
return new StatementExpression(ret);
} | protected Expression doConvert(Expression exp) {
List<Expression> ret = new LinkedList<>();
if (exp instanceof AttributeExpression) {
ret.add(new AttributeExpression(field.getName()));
} else if (exp instanceof IndexExpression) {
ret.add(new IndexExpression(field.getName()));
} else if (exp instanceof SummaryExpression) {
for (String fieldName : summaryFields) {
ret.add(new SummaryExpression(fieldName));
}
/*
* Write to summary field source. AddExtraFieldsToDocument processor adds the "copy"
* summary transform to summary fields without a corresponding explicitly declared
* document field (2023-11-01). Future vespa versions will stop adding document
* fields for those summary fields.
*/
if (!summaryFields.contains(field.getName())) {
ret.add(new SummaryExpression(field.getName()));
}
} else {
throw new UnsupportedOperationException(exp.getClass().getName());
}
return new StatementExpression(ret);
} | class MyConverter extends ExpressionConverter {
final Schema schema;
final Field field;
final Set<String> summaryFields;
final boolean validate;
MyConverter(Schema schema, Field field, Set<String> summaryFields, boolean validate) {
this.schema = schema;
this.field = field;
this.summaryFields = summaryFields.isEmpty() ? Collections.singleton(field.getName()) : summaryFields;
this.validate = validate;
}
@Override
protected boolean shouldConvert(Expression exp) {
if ( ! (exp instanceof OutputExpression)) {
return false;
}
String fieldName = ((OutputExpression)exp).getFieldName();
if (fieldName == null) {
return true;
}
if ( validate && ! fieldName.equals(field.getName())) {
fail(schema, field, "Indexing expression '" + exp + "' attempts to write to a field other than '" +
field.getName() + "'.");
}
return false;
}
@Override
} | class MyConverter extends ExpressionConverter {
final Schema schema;
final Field field;
final Set<String> summaryFields;
final boolean validate;
MyConverter(Schema schema, Field field, Set<String> summaryFields, boolean validate) {
this.schema = schema;
this.field = field;
this.summaryFields = summaryFields.isEmpty() ? Collections.singleton(field.getName()) : summaryFields;
this.validate = validate;
}
@Override
protected boolean shouldConvert(Expression exp) {
if ( ! (exp instanceof OutputExpression)) {
return false;
}
String fieldName = ((OutputExpression)exp).getFieldName();
if (fieldName == null) {
return true;
}
if ( validate && ! fieldName.equals(field.getName())) {
fail(schema, field, "Indexing expression '" + exp + "' attempts to write to a field other than '" +
field.getName() + "'.");
}
return false;
}
@Override
} | |
Changed. | else if (exp instanceof SummaryExpression) {
Field field = schema.getSummaryField(fieldName);
if (field == null) {
var sdField = schema.getConcreteField(fieldName);
if (sdField != null && sdField.doesSummarying()) {
fieldType = sdField.getDataType();
} else {
throw new IllegalArgumentException("Summary field '" + fieldName + "' not found.");
}
} else {
fieldType = field.getDataType();
}
} | else if (exp instanceof SummaryExpression) {
Field field = schema.getSummaryField(fieldName);
if (field == null) {
var sdField = schema.getConcreteField(fieldName);
if (sdField != null && sdField.doesSummarying()) {
fieldType = sdField.getDataType();
} else {
throw new IllegalArgumentException("Summary field '" + fieldName + "' not found.");
}
} else {
fieldType = field.getDataType();
}
} | class TypedTransformProvider extends ValueTransformProvider {
private final Schema schema;
private DataType fieldType;
TypedTransformProvider(Class<? extends Expression> transformClass, Schema schema) {
super(transformClass);
this.schema = schema;
}
@Override
protected final boolean requiresTransform(Expression exp) {
if (exp instanceof OutputExpression) {
String fieldName = ((OutputExpression)exp).getFieldName();
if (exp instanceof AttributeExpression) {
Attribute attribute = schema.getAttribute(fieldName);
if (attribute == null)
throw new IllegalArgumentException("Attribute '" + fieldName + "' not found.");
fieldType = attribute.getDataType();
}
else if (exp instanceof IndexExpression) {
Field field = schema.getConcreteField(fieldName);
if (field == null)
throw new IllegalArgumentException("Index field '" + fieldName + "' not found.");
fieldType = field.getDataType();
}
else {
throw new UnsupportedOperationException();
}
}
return requiresTransform(exp, fieldType);
}
@Override
protected final Expression newTransform() {
return newTransform(fieldType);
}
protected abstract boolean requiresTransform(Expression exp, DataType fieldType);
protected abstract Expression newTransform(DataType fieldType);
} | class TypedTransformProvider extends ValueTransformProvider {
private final Schema schema;
private DataType fieldType;
TypedTransformProvider(Class<? extends Expression> transformClass, Schema schema) {
super(transformClass);
this.schema = schema;
}
@Override
protected final boolean requiresTransform(Expression exp) {
if (exp instanceof OutputExpression) {
String fieldName = ((OutputExpression)exp).getFieldName();
if (exp instanceof AttributeExpression) {
Attribute attribute = schema.getAttribute(fieldName);
if (attribute == null)
throw new IllegalArgumentException("Attribute '" + fieldName + "' not found.");
fieldType = attribute.getDataType();
}
else if (exp instanceof IndexExpression) {
Field field = schema.getConcreteField(fieldName);
if (field == null)
throw new IllegalArgumentException("Index field '" + fieldName + "' not found.");
fieldType = field.getDataType();
}
else {
throw new UnsupportedOperationException();
}
}
return requiresTransform(exp, fieldType);
}
@Override
protected final Expression newTransform() {
return newTransform(fieldType);
}
protected abstract boolean requiresTransform(Expression exp, DataType fieldType);
protected abstract Expression newTransform(DataType fieldType);
} | |
Done. | protected Expression doConvert(Expression exp) {
List<Expression> ret = new LinkedList<>();
if (exp instanceof AttributeExpression) {
ret.add(new AttributeExpression(field.getName()));
} else if (exp instanceof IndexExpression) {
ret.add(new IndexExpression(field.getName()));
} else if (exp instanceof SummaryExpression) {
for (String fieldName : summaryFields) {
ret.add(new SummaryExpression(fieldName));
}
if (!summaryFields.contains(field.getName())) {
ret.add(new SummaryExpression(field.getName()));
}
} else {
throw new UnsupportedOperationException(exp.getClass().getName());
}
return new StatementExpression(ret);
} | protected Expression doConvert(Expression exp) {
List<Expression> ret = new LinkedList<>();
if (exp instanceof AttributeExpression) {
ret.add(new AttributeExpression(field.getName()));
} else if (exp instanceof IndexExpression) {
ret.add(new IndexExpression(field.getName()));
} else if (exp instanceof SummaryExpression) {
for (String fieldName : summaryFields) {
ret.add(new SummaryExpression(fieldName));
}
/*
* Write to summary field source. AddExtraFieldsToDocument processor adds the "copy"
* summary transform to summary fields without a corresponding explicitly declared
* document field (2023-11-01). Future vespa versions will stop adding document
* fields for those summary fields.
*/
if (!summaryFields.contains(field.getName())) {
ret.add(new SummaryExpression(field.getName()));
}
} else {
throw new UnsupportedOperationException(exp.getClass().getName());
}
return new StatementExpression(ret);
} | class MyConverter extends ExpressionConverter {
final Schema schema;
final Field field;
final Set<String> summaryFields;
final boolean validate;
MyConverter(Schema schema, Field field, Set<String> summaryFields, boolean validate) {
this.schema = schema;
this.field = field;
this.summaryFields = summaryFields.isEmpty() ? Collections.singleton(field.getName()) : summaryFields;
this.validate = validate;
}
@Override
protected boolean shouldConvert(Expression exp) {
if ( ! (exp instanceof OutputExpression)) {
return false;
}
String fieldName = ((OutputExpression)exp).getFieldName();
if (fieldName == null) {
return true;
}
if ( validate && ! fieldName.equals(field.getName())) {
fail(schema, field, "Indexing expression '" + exp + "' attempts to write to a field other than '" +
field.getName() + "'.");
}
return false;
}
@Override
} | class MyConverter extends ExpressionConverter {
final Schema schema;
final Field field;
final Set<String> summaryFields;
final boolean validate;
MyConverter(Schema schema, Field field, Set<String> summaryFields, boolean validate) {
this.schema = schema;
this.field = field;
this.summaryFields = summaryFields.isEmpty() ? Collections.singleton(field.getName()) : summaryFields;
this.validate = validate;
}
@Override
protected boolean shouldConvert(Expression exp) {
if ( ! (exp instanceof OutputExpression)) {
return false;
}
String fieldName = ((OutputExpression)exp).getFieldName();
if (fieldName == null) {
return true;
}
if ( validate && ! fieldName.equals(field.getName())) {
fail(schema, field, "Indexing expression '" + exp + "' attempts to write to a field other than '" +
field.getName() + "'.");
}
return false;
}
@Override
} | |
I thought int8 was signed [-127,128] and not unsigned [0,255]? | public void testUnpack() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates("tensor<float>(a{},x[16]):{foo:[" +
"0,0,0,0, 0,0,0,0," +
"1,1,1,1, 1,1,1,1" +
"],bar:[" +
"0,0,0,0, 0,0,0,1," +
"1,1,1,1, 1,0,0,0]}",
"unpack_bits_from_int8(tensor0, float, big)",
"tensor<int8>(a{},x[2]):{foo:[0,255],bar:[1,-8]}");
tester.assertEvaluates("tensor<int8>(a{},x[16]):{foo:[" +
"0,0,0,0, 0,0,0,0," +
"1,1,1,1, 1,1,1,1" +
"],bar:[" +
"1,0,0,0, 0,0,0,0," +
"0,0,0,1, 1,1,1,1]}",
"unpack_bits_from_int8(tensor0, int8, little)",
"tensor<int8>(a{},x[2]):{foo:[0,255],bar:[1,-8]}");
} | "tensor<int8>(a{},x[2]):{foo:[0,255],bar:[1,-8]}"); | public void testUnpack() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates("tensor<float>(a{},x[16]):{foo:[" +
"0,0,0,0, 0,0,0,0," +
"1,1,1,1, 1,1,1,1" +
"],bar:[" +
"0,0,0,0, 0,0,0,1," +
"1,1,1,1, 1,0,0,0]}",
"unpack_bits(tensor0, float, big)",
"tensor<int8>(a{},x[2]):{foo:[0,-1],bar:[1,-8]}");
tester.assertEvaluates("tensor<int8>(a{},x[16]):{foo:[" +
"0,0,0,0, 0,0,0,0," +
"1,1,1,1, 1,1,1,1" +
"],bar:[" +
"1,0,0,0, 0,0,0,0," +
"0,0,0,1, 1,1,1,1]}",
"unpack_bits(tensor0, int8, little)",
"tensor<int8>(a{},x[2]):{foo:[0,-1],bar:[1,-8]}");
} | class EvaluationTestCase {
private final double tolerance = 0.000001;
private void verifyStringValueToString(String s) {
s = '"' + s + '"';
Value val = Value.parse(s);
assertTrue(val instanceof StringValue);
assertEquals(s, val.toString());
}
@Test
public void testStringValueToString() {
verifyStringValueToString("");
verifyStringValueToString("something");
verifyStringValueToString("needs \\\" escape");
verifyStringValueToString("\\\\");
verifyStringValueToString("\\\"");
verifyStringValueToString("\\f");
verifyStringValueToString("\\female");
verifyStringValueToString("\\n");
verifyStringValueToString("\\nude");
verifyStringValueToString("\\r");
verifyStringValueToString("fa\\rt");
verifyStringValueToString("\\t");
verifyStringValueToString("fe\\tish");
verifyStringValueToString("\\f");
verifyStringValueToString("\\\\hx");
verifyStringValueToString("\\\\xx");
verifyStringValueToString("\\\\x10081977");
}
@Test
public void testEvaluationOrder() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates(-4, "1 + -2 + -3");
tester.assertEvaluates(2, "1 - (2 - 3)");
tester.assertEvaluates(-4, "(1 - 2) - 3");
tester.assertEvaluates(-4, "1 - 2 - 3");
}
@Test
public void testEvaluation() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates(0.5, "0.5");
tester.assertEvaluates(-0.5, "-0.5");
tester.assertEvaluates(0.5, "one_half");
tester.assertEvaluates(-0.5, "-one_half");
tester.assertEvaluates(0, "nonexisting");
tester.assertEvaluates(0.75, "0.5 + 0.25");
tester.assertEvaluates(0.75, "one_half + a_quarter");
tester.assertEvaluates(1.25, "0.5 - 0.25 + one");
tester.assertEvaluates(9.0, "3 ^ 2");
tester.assertEvaluates(1, "if(\"a\"==\"a\",1,0)");
tester.assertEvaluates(26, "2*3+4*5");
tester.assertEvaluates(1, "2/6+4/6");
tester.assertEvaluates(2 * 3 * 4 + 3 * 4 * 5 - 4 * 200 / 10, "2*3*4+3*4*5-4*200/10");
tester.assertEvaluates(3, "1 + 10 % 6 / 2");
tester.assertEvaluates(10.0, "3 ^ 2 + 1");
tester.assertEvaluates(18.0, "2 * 3 ^ 2");
tester.assertEvaluates(-4, "1 - 2 - 3");
tester.assertEvaluates(Math.pow(4, 9), "4^3^2");
tester.assertEvaluates(2 * (3 * 4 + 3) * (4 * 5 - 4 * 200) / 10, "2*(3*4+3)*(4*5-4*200)/10");
tester.assertEvaluates(0.5, "if( 2<3, one_half, one_quarter)");
tester.assertEvaluates(0.25,"if( 2>3, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 1==1, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 1<=1, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 1<=1.1, one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( 1>=1.1, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 0.33333333333333333333~=1/3, one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( 0.33333333333333333333~=1/35, one_half, a_quarter)");
tester.assertEvaluates(5.5, "if(one_half in [one_quarter,one_half], one_half+5,log(one_quarter) * one_quarter)");
tester.assertEvaluates(0.5, "if( 1 in [1,2 , 3], one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( 1 in [ 2,3,4], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( \"foo\" in [\"foo\",\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( foo in [\"foo\",\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( \"foo\" in [foo,\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( foo in [foo,\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( \"foo\" in [\"baz\",\"boz\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( one in [0, 1, 2], one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( one in [2], one_half, a_quarter)");
tester.assertEvaluates(2.5, "if(1.0, 2.5, 3.5)");
tester.assertEvaluates(3.5, "if(0.0, 2.5, 3.5)");
tester.assertEvaluates(2.5, "if(1.0-1.1, 2.5, 3.5)");
tester.assertEvaluates(3.5, "if(1.0-1.0, 2.5, 3.5)");
RankingExpression e = tester.assertEvaluates(3.5, "if(1.0-1.0, 2.5, 3.5, 0.3)");
assertEquals(0.3d, ((IfNode) e.getRoot()).getTrueProbability(), tolerance);
tester.assertEvaluates(new BooleanValue(true), "2<3");
tester.assertEvaluates(new BooleanValue(false), "2>3");
tester.assertEvaluates(new BooleanValue(false), "if (3>2, 2>3, 5.0)");
tester.assertEvaluates(new BooleanValue(true), "2>3<1");
tester.assertEvaluates(2.5, "if(2>3<1, 2.5, 3.5)");
tester.assertEvaluates(2.5, "if(1+1>3<1+0, 2.5, 3.5)");
tester.assertEvaluates(0, "sin(0)");
tester.assertEvaluates(1, "cos(0)");
tester.assertEvaluates(8, "pow(4/2,min(cos(0)*3,5))");
tester.assertEvaluates(0, "random(1)");
tester.assertEvaluates(0, "random(foo)");
tester.assertEvaluates(1.25, "5*if(1>=1.1, one_half, if(min(1,2)<max(1,2),if (\"foo\" in [\"foo\",\"bar\"],a_quarter,3000), 0.57345347))");
}
@Test
public void testBooleanEvaluation() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates(false, "false");
tester.assertEvaluates(true, "true");
tester.assertEvaluates(false, "0 && 0");
tester.assertEvaluates(false, "0 && 1");
tester.assertEvaluates(false, "1 && 0");
tester.assertEvaluates(true, "1 && 1");
tester.assertEvaluates(true, "1 && 2");
tester.assertEvaluates(true, "1 && 0.1");
tester.assertEvaluates(false, "0 || 0");
tester.assertEvaluates(true, "0 || 0.1");
tester.assertEvaluates(true, "0 || 1");
tester.assertEvaluates(true, "1 || 0");
tester.assertEvaluates(true, "1 || 1");
tester.assertEvaluates(true, "!0");
tester.assertEvaluates(false, "!1");
tester.assertEvaluates(false, "!2");
tester.assertEvaluates(true, "!0 && 1");
tester.assertEvaluates(0, "2 * (0 && 1)");
tester.assertEvaluates(2, "2 * (1 && 1)");
tester.assertEvaluates(true, "2 + 0 && 1");
tester.assertEvaluates(true, "1 && 0 + 2");
}
@Test
@Test
public void testMapSubspaces() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates("tensor<float>(a{},x[2]):{foo:[2,3],bar:[7,10]}",
"map_subspaces(tensor0, f(t)(t))",
"tensor<float>(a{},x[2]):{foo:[2,3],bar:[7,10]}");
tester.assertEvaluates("tensor<float>(a{},x[2]):{foo:[2,3],bar:[7,10]}",
"map_subspaces(tensor0, f(t)(t+2))",
"tensor<float>(a{},x[2]):{foo:[0,1],bar:[5,8]}");
tester.assertEvaluates("tensor<float>(a{},y[2]):{foo:[3,5],bar:[9,11]}",
"map_subspaces(tensor0, f(t)(tensor<float>(y[2])(t{x:(y)}+t{x:(y+1)})))",
"tensor(a{},x[3]):{foo:[1,2,3],bar:[4,5,6]}");
tester.assertEvaluates("tensor<double>(a{},x[2]):{foo:[3,5],bar:[9,11]}",
"map_subspaces(tensor0, f(t)(tensor(x[2])(t{x:(x)}+t{x:(x+1)})))",
"tensor<float>(a{},x[3]):{foo:[1,2,3],bar:[4,5,6]}");
}
@Test
public void testTensorEvaluation() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates("{}", "tensor0", "{}");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:2, {d1:2 }:3 }",
"map(tensor0, f(x) (log10(x)))", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:4, {d1:1}:9, {d1:2 }:16 }",
"map(tensor0, f(x) (x * x))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:0, {d1:1}:1, {d1:2 }:0 }",
"tensor0 == 3", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:0, {d1:1}:1, {d1:2 }:0 }",
"3 == tensor0", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:2, {d1:2 }:3 }",
"log10(tensor0)", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:-10, {d1:1}:-100, {d1:2 }:-1000 }",
"- tensor0", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:-10, {d1:1}:0, {d1:2 }:0 }",
"min(tensor0, 0)", "{ {d1:0}:-10, {d1:1}:0, {d1:2}:10 }");
tester.assertEvaluates("{ {d1:0}:0, {d1:1}:0, {d1:2 }:10 }",
"max(tensor0, 0)", "{ {d1:0}:-10, {d1:1}:0, {d1:2}:10 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"tensor0 % 2 == map(tensor0, f(x) (x % 2))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"(tensor0 || 1) == map(tensor0, f(x) (x || 1))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"(tensor0 && 1) == map(tensor0, f(x) (x && 1))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"!tensor0 == map(tensor0, f(x) (!x))", "{ {d1:0}:0, {d1:1}:1, {d1:2}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "abs(tensor0)", "{ {x:0}:1, {x:1}:-2 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "acos(tensor0)", "{ {x:0}:1, {x:1}:1 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "asin(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "atan(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "ceil(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:1 }", "cos(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:1 }", "cosh(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "elu(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:1 }", "exp(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "fabs(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "floor(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "isNan(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "log(tensor0)", "{ {x:0}:1, {x:1}:1 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:1 }", "log10(tensor0)", "{ {x:0}:1, {x:1}:10 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:2 }", "fmod(tensor0, 3)","{ {x:0}:3, {x:1}:8 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:8 }", "pow(tensor0, 3)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:8, {x:1}:16 }", "ldexp(tensor0,3.1)","{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "relu(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "round(tensor0)", "{ {x:0}:1, {x:1}:1.8 }");
tester.assertEvaluates("{ {x:0}:0.5, {x:1}:0.5 }", "sigmoid(tensor0)","{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:-1 }", "sign(tensor0)", "{ {x:0}:3, {x:1}:-5 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "sin(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "sinh(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:4 }", "square(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:3 }", "sqrt(tensor0)", "{ {x:0}:1, {x:1}:9 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "tan(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "tanh(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, avg, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, count, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:7 }",
"reduce(tensor0, max, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, median, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:1 }",
"reduce(tensor0, min, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:105 }",
"reduce(tensor0, prod, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:16 }",
"reduce(tensor0, sum, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, avg)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:2, {y:1}:6 }",
"reduce(tensor0, avg, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:2, {y:1}:2 }",
"reduce(tensor0, count, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:3, {y:1}:35 }",
"reduce(tensor0, prod, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:4, {y:1}:12 }",
"reduce(tensor0, sum, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:3, {y:1}:7 }",
"reduce(tensor0, max, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:1, {y:1}:5 }",
"reduce(tensor0, min, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}: 5 }", "sum(tensor0)", "5.0");
tester.assertEvaluates("{ {}:-5 }", "sum(tensor0)", "-5.0");
tester.assertEvaluates("{ {}:12.5 }", "sum(tensor0)", "{ {d1:0}:5.5, {d1:1}:7.0 }");
tester.assertEvaluates("{ {}: 0 }", "sum(tensor0)", "{ {d1:0}:5.0, {d1:1}:7.0, {d1:2}:-12.0}");
tester.assertEvaluates("{ {}: 8.0 }", "avg(tensor0)", "{ {d1:0}:5.0, {d1:1}:7.0, {d1:2}:12.0}");
tester.assertEvaluates("{ {}: 5.0 }", "median(tensor0)", "{ {d1:0}:5.0, {d1:1}:7.0, {d1:2}:-12.0}");
tester.assertEvaluates("{ {y:0}:4, {y:1}:12.0 }",
"sum(tensor0, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {x:0}:6, {x:1}:10.0 }",
"sum(tensor0, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:16 }",
"sum(tensor0, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}: -1 }", "reduce(tensor0, max)", "tensor(x[2]):[-2,-1]");
tester.assertEvaluates("{ {x:0,y:0}:15, {x:1,y:0}:35 }", "join(tensor0, tensor1, f(x,y) (x*y))", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:6, {x:1,y:0}:14 }", "join(tensor0, tensor1, f(x,y) (x+x))", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0}:2, {x:1}:-3 }", "join(tensor0, tensor1, f(x,y) (y-x))", "{ {x:0}:3, {x:1}:7 }", "{ {x:0}:5, {x:1}:4 }");
tester.assertEvaluates("{ }", "tensor0 * tensor0", "{}");
tester.assertEvaluates("{{x:0,y:0,z:0}:0.0}", "( tensor0 * tensor1 ) * ( tensor2 * tensor1 )",
"{{x:0}:1}", "{}", "{{y:0,z:0}:1}");
tester.assertEvaluates("tensor(x{}):{}",
"tensor0 * tensor1", "{ {x:0}:3 }", "tensor(x{}):{ {x:1}:5 }");
tester.assertEvaluates("tensor<double>(x{}):{}",
"tensor0 * tensor1", "{ {x:0}:3 }", "tensor<float>(x{}):{ {x:1}:5 }");
tester.assertEvaluates("{ {x:0}:15 }",
"tensor0 * tensor1", "{ {x:0}:3 }", "{ {x:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:15 }",
"tensor0 * tensor1", "{ {x:0}:3 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:15, {x:1,y:0}:35 }",
"tensor0 * tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:8, {x:1,y:0}:12 }",
"tensor0 + tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:-2, {x:1,y:0}:2 }",
"tensor0 - tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:5, {x:1,y:0}:4 }",
"tensor0 / tensor1", "{ {x:0}:15, {x:1}:12 }", "{ {y:0}:3 }");
tester.assertEvaluates("{ {x:0,y:0}:5, {x:1,y:0}:7 }",
"max(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:5 }",
"min(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:243, {x:1,y:0}:16807 }",
"pow(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:243, {x:1,y:0}:16807 }",
"tensor0 ^ tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:2 }",
"fmod(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:2 }",
"tensor0 % tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:96, {x:1,y:0}:224 }",
"ldexp(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5.1 }");
tester.assertEvaluates("{ {x:0,y:0,z:0}:7, {x:0,y:0,z:1}:13, {x:1,y:0,z:0}:21, {x:1,y:0,z:1}:39, {x:0,y:1,z:0}:55, {x:0,y:1,z:1}:0, {x:1,y:1,z:0}:0, {x:1,y:1,z:1}:0 }",
"tensor0 * tensor1", "{ {x:0,y:0}:1, {x:1,y:0}:3, {x:0,y:1}:5, {x:1,y:1}:0 }", "{ {y:0,z:0}:7, {y:1,z:0}:11, {y:0,z:1}:13, {y:1,z:1}:0 }");
tester.assertEvaluates("{ {x:0,y:1,z:0}:35, {x:0,y:1,z:1}:65 }",
"tensor0 * tensor1", "tensor(x{},y{}):{ {x:0,y:0}:1, {x:1,y:0}:3, {x:0,y:1}:5 }", "tensor(y{},z{}):{ {y:1,z:0}:7, {y:2,z:0}:11, {y:1,z:1}:13 })");
tester.assertEvaluates("{{x:0,y:0}:0.0}","tensor1 * tensor2 * tensor3", "{ {x:0}:1 }", "{ {x:1,y:0}:1, {x:0,y:0}:1 }", "{ {x:0,y:0}:1 }");
tester.assertEvaluates("{ {d1:0}:50, {d1:1}:500, {d1:2}:5000 }",
"5 * tensor0", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:13, {d1:1}:103, {d1:2}:1003 }",
"tensor0 + 3","{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:10, {d1:2 }:100 }",
"tensor0 / 10", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {h:0}:1.5, {h:1}:1.5 }", "0.5 + tensor0", "{ {h:0}:1.0,{h:1}:1.0 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:0 }",
"atan2(tensor0, tensor1)", "{ {x:0}:0, {x:1}:0 }", "{ {y:0}:1 }");
tester.assertEvaluates("{ {x:0,y:0}:2, {x:1,y:0}:7 }",
"hamming(tensor0, tensor1)", "{ {x:0}:97, {x:1}:-1 }", "{ {y:0}:1 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 > tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 < tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 >= tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 <= tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 == tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 ~= tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 != tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:0 }",
"tensor0 in [1,2,3]", "{ {x:0}:3, {x:1}:7 }");
tester.assertEvaluates("{ {x:0}:0.1 }", "join(tensor0, 0.1, f(x,y) (x*y))", "{ {x:0}:1 }");
tester.assertEvaluates("{ {x:0}:15, {x:1}:4 }", "merge(tensor0, tensor1, f(x,y) (x*y))", "{ {x:0}:3 }", "{ {x:0}:5, {x:1}:4 }");
tester.assertEvaluates("{ }", "merge(tensor0, tensor1, f(x,y) (x*y))", "{}");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 != tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {newX:0,y:0}:3 }", "rename(tensor0, x, newX)", "{ {x:0,y:0}:3.0 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:5 }", "rename(tensor0, (x, y), (y, x))", "{ {x:0,y:0}:3.0, {x:0,y:1}:5.0 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:0, {x:0,y:1}:1, {x:1,y:1}:0, {x:0,y:2}:0, {x:1,y:2}:1 }", "tensor(x[2],y[3])(x+1==y)");
tester.assertEvaluates("{ {y:0,x:0}:0, {y:1,x:0}:0, {y:0,x:1}:1, {y:1,x:1}:0, {y:0,x:2}:0, {y:1,x:2}:1 }", "tensor(y[2],x[3])(y+1==x)");
tester.assertEvaluates("{ {x:0,y:0,z:0}:1 }", "tensor(x[1],y[1],z[1])((x==y)*(y==z))");
tester.assertEvaluates("{ {x:0}:0, {x:1}:1, {x:2}:2 }", "range(x[3])");
tester.assertEvaluates("{ {x:0,y:0,z:0}:1, {x:0,y:0,z:1}:0, {x:0,y:1,z:0}:0, {x:0,y:1,z:1}:0, {x:1,y:0,z:0}:0, {x:1,y:0,z:1}:0, {x:1,y:1,z:0}:0, {x:1,y:1,z:1}:1, }", "diag(x[2],y[2],z[2])");
tester.assertEvaluates("6", "reduce(random(x[2],y[3]), count)");
tester.assertEvaluates("tensor(x[2]):[0.0, 2.0]",
"tensor(x[2]):{{x:0}:tensor(y[2]):{{y:0}:((0+0)+a)," +
"{y:1}:((0+1)+a)}{y:0}," +
"{x:1}:tensor(y[2]):{{y:0}:((1+0)+a)," +
"{y:1}:((1+1)+a)}{y:1}" +
"}");
tester.assertEvaluates("3.0", "tensor0{x:1}", "{ {x:0}:1, {x:1}:3 }");
tester.assertEvaluates("1.2", "tensor0{key:foo,x:0}", true, "{ {key:foo,x:0}:1.2, {key:bar,x:0}:3 }");
tester.assertEvaluates("3.0", "tensor0{bar}", true, "{ {x:foo}:1, {x:bar}:3 }");
tester.assertEvaluates("3.3", "tensor0[2]", "tensor(values[4]):[1.1, 2.2, 3.3, 4.4]]");
tester.assertEvaluates("tensor(x[5]):[0, 1, 2, 3, 4]",
"concat(tensor0, tensor1, x)",
"tensor(x[2]):[0, 1]",
"tensor(x[3]):[2, 3, 4])");
tester.assertEvaluates("{ {x:0}:0.25, {x:1}:0.75 }", "l1_normalize(tensor0, x)", "{ {x:0}:1, {x:1}:3 }");
tester.assertEvaluates("{ {x:0}:0.31622776601683794, {x:1}:0.9486832980505138 }", "l2_normalize(tensor0, x)", "{ {x:0}:1, {x:1}:3 }");
tester.assertEvaluates("{ {y:0}:81.0 }", "matmul(tensor0, tensor1, x)", "{ {x:0}:15, {x:1}:12 }", "{ {y:0}:3 }");
tester.assertEvaluates("{ {x:0}:0.5, {x:1}:0.5 }", "softmax(tensor0, x)", "{ {x:0}:1, {x:1}:1 }", "{ {y:0}:1 }");
tester.assertEvaluates("{ {x:0,y:0}:81.0, {x:1,y:0}:88.0 }", "xw_plus_b(tensor0, tensor1, tensor2, x)", "{ {x:0}:15, {x:1}:12 }", "{ {y:0}:3 }", "{ {x:0}:0, {x:1}:7 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:0, {x:2}:0, {x:3}:1 }", "argmax(tensor0, x)", "{ {x:0}:15, {x:1}:12, {x:2}:7, {x:3}:15 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0, {x:2}:1, {x:3}:0 }", "argmin(tensor0, x)", "{ {x:0}:15, {x:1}:12, {x:2}:7, {x:3}:15 }");
tester.assertEvaluates("tensor(y{}):{{y:6}:0}}", "matmul(tensor0, diag(x[5],y[7]), x)", "tensor(x{},y{}):{{x:4,y:6}:1})");
tester.assertEvaluates("tensor(y{}):{{y:6}:10}} | class EvaluationTestCase {
private final double tolerance = 0.000001;
private void verifyStringValueToString(String s) {
s = '"' + s + '"';
Value val = Value.parse(s);
assertTrue(val instanceof StringValue);
assertEquals(s, val.toString());
}
@Test
public void testStringValueToString() {
verifyStringValueToString("");
verifyStringValueToString("something");
verifyStringValueToString("needs \\\" escape");
verifyStringValueToString("\\\\");
verifyStringValueToString("\\\"");
verifyStringValueToString("\\f");
verifyStringValueToString("\\female");
verifyStringValueToString("\\n");
verifyStringValueToString("\\nude");
verifyStringValueToString("\\r");
verifyStringValueToString("fa\\rt");
verifyStringValueToString("\\t");
verifyStringValueToString("fe\\tish");
verifyStringValueToString("\\f");
verifyStringValueToString("\\\\hx");
verifyStringValueToString("\\\\xx");
verifyStringValueToString("\\\\x10081977");
}
@Test
public void testEvaluationOrder() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates(-4, "1 + -2 + -3");
tester.assertEvaluates(2, "1 - (2 - 3)");
tester.assertEvaluates(-4, "(1 - 2) - 3");
tester.assertEvaluates(-4, "1 - 2 - 3");
}
@Test
public void testEvaluation() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates(0.5, "0.5");
tester.assertEvaluates(-0.5, "-0.5");
tester.assertEvaluates(0.5, "one_half");
tester.assertEvaluates(-0.5, "-one_half");
tester.assertEvaluates(0, "nonexisting");
tester.assertEvaluates(0.75, "0.5 + 0.25");
tester.assertEvaluates(0.75, "one_half + a_quarter");
tester.assertEvaluates(1.25, "0.5 - 0.25 + one");
tester.assertEvaluates(9.0, "3 ^ 2");
tester.assertEvaluates(1, "if(\"a\"==\"a\",1,0)");
tester.assertEvaluates(26, "2*3+4*5");
tester.assertEvaluates(1, "2/6+4/6");
tester.assertEvaluates(2 * 3 * 4 + 3 * 4 * 5 - 4 * 200 / 10, "2*3*4+3*4*5-4*200/10");
tester.assertEvaluates(3, "1 + 10 % 6 / 2");
tester.assertEvaluates(10.0, "3 ^ 2 + 1");
tester.assertEvaluates(18.0, "2 * 3 ^ 2");
tester.assertEvaluates(-4, "1 - 2 - 3");
tester.assertEvaluates(Math.pow(4, 9), "4^3^2");
tester.assertEvaluates(2 * (3 * 4 + 3) * (4 * 5 - 4 * 200) / 10, "2*(3*4+3)*(4*5-4*200)/10");
tester.assertEvaluates(0.5, "if( 2<3, one_half, one_quarter)");
tester.assertEvaluates(0.25,"if( 2>3, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 1==1, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 1<=1, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 1<=1.1, one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( 1>=1.1, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 0.33333333333333333333~=1/3, one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( 0.33333333333333333333~=1/35, one_half, a_quarter)");
tester.assertEvaluates(5.5, "if(one_half in [one_quarter,one_half], one_half+5,log(one_quarter) * one_quarter)");
tester.assertEvaluates(0.5, "if( 1 in [1,2 , 3], one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( 1 in [ 2,3,4], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( \"foo\" in [\"foo\",\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( foo in [\"foo\",\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( \"foo\" in [foo,\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( foo in [foo,\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( \"foo\" in [\"baz\",\"boz\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( one in [0, 1, 2], one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( one in [2], one_half, a_quarter)");
tester.assertEvaluates(2.5, "if(1.0, 2.5, 3.5)");
tester.assertEvaluates(3.5, "if(0.0, 2.5, 3.5)");
tester.assertEvaluates(2.5, "if(1.0-1.1, 2.5, 3.5)");
tester.assertEvaluates(3.5, "if(1.0-1.0, 2.5, 3.5)");
RankingExpression e = tester.assertEvaluates(3.5, "if(1.0-1.0, 2.5, 3.5, 0.3)");
assertEquals(0.3d, ((IfNode) e.getRoot()).getTrueProbability(), tolerance);
tester.assertEvaluates(new BooleanValue(true), "2<3");
tester.assertEvaluates(new BooleanValue(false), "2>3");
tester.assertEvaluates(new BooleanValue(false), "if (3>2, 2>3, 5.0)");
tester.assertEvaluates(new BooleanValue(true), "2>3<1");
tester.assertEvaluates(2.5, "if(2>3<1, 2.5, 3.5)");
tester.assertEvaluates(2.5, "if(1+1>3<1+0, 2.5, 3.5)");
tester.assertEvaluates(0, "sin(0)");
tester.assertEvaluates(1, "cos(0)");
tester.assertEvaluates(8, "pow(4/2,min(cos(0)*3,5))");
tester.assertEvaluates(0, "random(1)");
tester.assertEvaluates(0, "random(foo)");
tester.assertEvaluates(1.25, "5*if(1>=1.1, one_half, if(min(1,2)<max(1,2),if (\"foo\" in [\"foo\",\"bar\"],a_quarter,3000), 0.57345347))");
}
@Test
public void testBooleanEvaluation() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates(false, "false");
tester.assertEvaluates(true, "true");
tester.assertEvaluates(false, "0 && 0");
tester.assertEvaluates(false, "0 && 1");
tester.assertEvaluates(false, "1 && 0");
tester.assertEvaluates(true, "1 && 1");
tester.assertEvaluates(true, "1 && 2");
tester.assertEvaluates(true, "1 && 0.1");
tester.assertEvaluates(false, "0 || 0");
tester.assertEvaluates(true, "0 || 0.1");
tester.assertEvaluates(true, "0 || 1");
tester.assertEvaluates(true, "1 || 0");
tester.assertEvaluates(true, "1 || 1");
tester.assertEvaluates(true, "!0");
tester.assertEvaluates(false, "!1");
tester.assertEvaluates(false, "!2");
tester.assertEvaluates(true, "!0 && 1");
tester.assertEvaluates(0, "2 * (0 && 1)");
tester.assertEvaluates(2, "2 * (1 && 1)");
tester.assertEvaluates(true, "2 + 0 && 1");
tester.assertEvaluates(true, "1 && 0 + 2");
}
@Test
@Test
public void testMapSubspaces() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates("tensor<float>(a{},x[2]):{foo:[2,3],bar:[7,10]}",
"map_subspaces(tensor0, f(t)(t))",
"tensor<float>(a{},x[2]):{foo:[2,3],bar:[7,10]}");
tester.assertEvaluates("tensor<float>(a{},x[2]):{foo:[2,3],bar:[7,10]}",
"map_subspaces(tensor0, f(t)(t+2))",
"tensor<float>(a{},x[2]):{foo:[0,1],bar:[5,8]}");
tester.assertEvaluates("tensor<float>(a{},y[2]):{foo:[3,5],bar:[9,11]}",
"map_subspaces(tensor0, f(t)(tensor<float>(y[2])(t{x:(y)}+t{x:(y+1)})))",
"tensor(a{},x[3]):{foo:[1,2,3],bar:[4,5,6]}");
tester.assertEvaluates("tensor<double>(a{},x[2]):{foo:[3,5],bar:[9,11]}",
"map_subspaces(tensor0, f(t)(tensor(x[2])(t{x:(x)}+t{x:(x+1)})))",
"tensor<float>(a{},x[3]):{foo:[1,2,3],bar:[4,5,6]}");
}
@Test
public void testTensorEvaluation() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates("{}", "tensor0", "{}");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:2, {d1:2 }:3 }",
"map(tensor0, f(x) (log10(x)))", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:4, {d1:1}:9, {d1:2 }:16 }",
"map(tensor0, f(x) (x * x))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:0, {d1:1}:1, {d1:2 }:0 }",
"tensor0 == 3", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:0, {d1:1}:1, {d1:2 }:0 }",
"3 == tensor0", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:2, {d1:2 }:3 }",
"log10(tensor0)", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:-10, {d1:1}:-100, {d1:2 }:-1000 }",
"- tensor0", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:-10, {d1:1}:0, {d1:2 }:0 }",
"min(tensor0, 0)", "{ {d1:0}:-10, {d1:1}:0, {d1:2}:10 }");
tester.assertEvaluates("{ {d1:0}:0, {d1:1}:0, {d1:2 }:10 }",
"max(tensor0, 0)", "{ {d1:0}:-10, {d1:1}:0, {d1:2}:10 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"tensor0 % 2 == map(tensor0, f(x) (x % 2))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"(tensor0 || 1) == map(tensor0, f(x) (x || 1))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"(tensor0 && 1) == map(tensor0, f(x) (x && 1))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"!tensor0 == map(tensor0, f(x) (!x))", "{ {d1:0}:0, {d1:1}:1, {d1:2}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "abs(tensor0)", "{ {x:0}:1, {x:1}:-2 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "acos(tensor0)", "{ {x:0}:1, {x:1}:1 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "asin(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "atan(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "ceil(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:1 }", "cos(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:1 }", "cosh(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "elu(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:1 }", "exp(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "fabs(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "floor(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "isNan(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "log(tensor0)", "{ {x:0}:1, {x:1}:1 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:1 }", "log10(tensor0)", "{ {x:0}:1, {x:1}:10 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:2 }", "fmod(tensor0, 3)","{ {x:0}:3, {x:1}:8 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:8 }", "pow(tensor0, 3)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:8, {x:1}:16 }", "ldexp(tensor0,3.1)","{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "relu(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "round(tensor0)", "{ {x:0}:1, {x:1}:1.8 }");
tester.assertEvaluates("{ {x:0}:0.5, {x:1}:0.5 }", "sigmoid(tensor0)","{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:-1 }", "sign(tensor0)", "{ {x:0}:3, {x:1}:-5 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "sin(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "sinh(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:4 }", "square(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:3 }", "sqrt(tensor0)", "{ {x:0}:1, {x:1}:9 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "tan(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "tanh(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, avg, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, count, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:7 }",
"reduce(tensor0, max, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, median, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:1 }",
"reduce(tensor0, min, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:105 }",
"reduce(tensor0, prod, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:16 }",
"reduce(tensor0, sum, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, avg)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:2, {y:1}:6 }",
"reduce(tensor0, avg, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:2, {y:1}:2 }",
"reduce(tensor0, count, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:3, {y:1}:35 }",
"reduce(tensor0, prod, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:4, {y:1}:12 }",
"reduce(tensor0, sum, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:3, {y:1}:7 }",
"reduce(tensor0, max, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:1, {y:1}:5 }",
"reduce(tensor0, min, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}: 5 }", "sum(tensor0)", "5.0");
tester.assertEvaluates("{ {}:-5 }", "sum(tensor0)", "-5.0");
tester.assertEvaluates("{ {}:12.5 }", "sum(tensor0)", "{ {d1:0}:5.5, {d1:1}:7.0 }");
tester.assertEvaluates("{ {}: 0 }", "sum(tensor0)", "{ {d1:0}:5.0, {d1:1}:7.0, {d1:2}:-12.0}");
tester.assertEvaluates("{ {}: 8.0 }", "avg(tensor0)", "{ {d1:0}:5.0, {d1:1}:7.0, {d1:2}:12.0}");
tester.assertEvaluates("{ {}: 5.0 }", "median(tensor0)", "{ {d1:0}:5.0, {d1:1}:7.0, {d1:2}:-12.0}");
tester.assertEvaluates("{ {y:0}:4, {y:1}:12.0 }",
"sum(tensor0, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {x:0}:6, {x:1}:10.0 }",
"sum(tensor0, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:16 }",
"sum(tensor0, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}: -1 }", "reduce(tensor0, max)", "tensor(x[2]):[-2,-1]");
tester.assertEvaluates("{ {x:0,y:0}:15, {x:1,y:0}:35 }", "join(tensor0, tensor1, f(x,y) (x*y))", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:6, {x:1,y:0}:14 }", "join(tensor0, tensor1, f(x,y) (x+x))", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0}:2, {x:1}:-3 }", "join(tensor0, tensor1, f(x,y) (y-x))", "{ {x:0}:3, {x:1}:7 }", "{ {x:0}:5, {x:1}:4 }");
tester.assertEvaluates("{ }", "tensor0 * tensor0", "{}");
tester.assertEvaluates("{{x:0,y:0,z:0}:0.0}", "( tensor0 * tensor1 ) * ( tensor2 * tensor1 )",
"{{x:0}:1}", "{}", "{{y:0,z:0}:1}");
tester.assertEvaluates("tensor(x{}):{}",
"tensor0 * tensor1", "{ {x:0}:3 }", "tensor(x{}):{ {x:1}:5 }");
tester.assertEvaluates("tensor<double>(x{}):{}",
"tensor0 * tensor1", "{ {x:0}:3 }", "tensor<float>(x{}):{ {x:1}:5 }");
tester.assertEvaluates("{ {x:0}:15 }",
"tensor0 * tensor1", "{ {x:0}:3 }", "{ {x:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:15 }",
"tensor0 * tensor1", "{ {x:0}:3 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:15, {x:1,y:0}:35 }",
"tensor0 * tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:8, {x:1,y:0}:12 }",
"tensor0 + tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:-2, {x:1,y:0}:2 }",
"tensor0 - tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:5, {x:1,y:0}:4 }",
"tensor0 / tensor1", "{ {x:0}:15, {x:1}:12 }", "{ {y:0}:3 }");
tester.assertEvaluates("{ {x:0,y:0}:5, {x:1,y:0}:7 }",
"max(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:5 }",
"min(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:243, {x:1,y:0}:16807 }",
"pow(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:243, {x:1,y:0}:16807 }",
"tensor0 ^ tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:2 }",
"fmod(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:2 }",
"tensor0 % tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:96, {x:1,y:0}:224 }",
"ldexp(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5.1 }");
tester.assertEvaluates("{ {x:0,y:0,z:0}:7, {x:0,y:0,z:1}:13, {x:1,y:0,z:0}:21, {x:1,y:0,z:1}:39, {x:0,y:1,z:0}:55, {x:0,y:1,z:1}:0, {x:1,y:1,z:0}:0, {x:1,y:1,z:1}:0 }",
"tensor0 * tensor1", "{ {x:0,y:0}:1, {x:1,y:0}:3, {x:0,y:1}:5, {x:1,y:1}:0 }", "{ {y:0,z:0}:7, {y:1,z:0}:11, {y:0,z:1}:13, {y:1,z:1}:0 }");
tester.assertEvaluates("{ {x:0,y:1,z:0}:35, {x:0,y:1,z:1}:65 }",
"tensor0 * tensor1", "tensor(x{},y{}):{ {x:0,y:0}:1, {x:1,y:0}:3, {x:0,y:1}:5 }", "tensor(y{},z{}):{ {y:1,z:0}:7, {y:2,z:0}:11, {y:1,z:1}:13 })");
tester.assertEvaluates("{{x:0,y:0}:0.0}","tensor1 * tensor2 * tensor3", "{ {x:0}:1 }", "{ {x:1,y:0}:1, {x:0,y:0}:1 }", "{ {x:0,y:0}:1 }");
tester.assertEvaluates("{ {d1:0}:50, {d1:1}:500, {d1:2}:5000 }",
"5 * tensor0", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:13, {d1:1}:103, {d1:2}:1003 }",
"tensor0 + 3","{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:10, {d1:2 }:100 }",
"tensor0 / 10", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {h:0}:1.5, {h:1}:1.5 }", "0.5 + tensor0", "{ {h:0}:1.0,{h:1}:1.0 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:0 }",
"atan2(tensor0, tensor1)", "{ {x:0}:0, {x:1}:0 }", "{ {y:0}:1 }");
tester.assertEvaluates("{ {x:0,y:0}:2, {x:1,y:0}:7 }",
"hamming(tensor0, tensor1)", "{ {x:0}:97, {x:1}:-1 }", "{ {y:0}:1 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 > tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 < tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 >= tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 <= tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 == tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 ~= tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 != tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:0 }",
"tensor0 in [1,2,3]", "{ {x:0}:3, {x:1}:7 }");
tester.assertEvaluates("{ {x:0}:0.1 }", "join(tensor0, 0.1, f(x,y) (x*y))", "{ {x:0}:1 }");
tester.assertEvaluates("{ {x:0}:15, {x:1}:4 }", "merge(tensor0, tensor1, f(x,y) (x*y))", "{ {x:0}:3 }", "{ {x:0}:5, {x:1}:4 }");
tester.assertEvaluates("{ }", "merge(tensor0, tensor1, f(x,y) (x*y))", "{}");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 != tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {newX:0,y:0}:3 }", "rename(tensor0, x, newX)", "{ {x:0,y:0}:3.0 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:5 }", "rename(tensor0, (x, y), (y, x))", "{ {x:0,y:0}:3.0, {x:0,y:1}:5.0 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:0, {x:0,y:1}:1, {x:1,y:1}:0, {x:0,y:2}:0, {x:1,y:2}:1 }", "tensor(x[2],y[3])(x+1==y)");
tester.assertEvaluates("{ {y:0,x:0}:0, {y:1,x:0}:0, {y:0,x:1}:1, {y:1,x:1}:0, {y:0,x:2}:0, {y:1,x:2}:1 }", "tensor(y[2],x[3])(y+1==x)");
tester.assertEvaluates("{ {x:0,y:0,z:0}:1 }", "tensor(x[1],y[1],z[1])((x==y)*(y==z))");
tester.assertEvaluates("{ {x:0}:0, {x:1}:1, {x:2}:2 }", "range(x[3])");
tester.assertEvaluates("{ {x:0,y:0,z:0}:1, {x:0,y:0,z:1}:0, {x:0,y:1,z:0}:0, {x:0,y:1,z:1}:0, {x:1,y:0,z:0}:0, {x:1,y:0,z:1}:0, {x:1,y:1,z:0}:0, {x:1,y:1,z:1}:1, }", "diag(x[2],y[2],z[2])");
tester.assertEvaluates("6", "reduce(random(x[2],y[3]), count)");
tester.assertEvaluates("tensor(x[2]):[0.0, 2.0]",
"tensor(x[2]):{{x:0}:tensor(y[2]):{{y:0}:((0+0)+a)," +
"{y:1}:((0+1)+a)}{y:0}," +
"{x:1}:tensor(y[2]):{{y:0}:((1+0)+a)," +
"{y:1}:((1+1)+a)}{y:1}" +
"}");
tester.assertEvaluates("3.0", "tensor0{x:1}", "{ {x:0}:1, {x:1}:3 }");
tester.assertEvaluates("1.2", "tensor0{key:foo,x:0}", true, "{ {key:foo,x:0}:1.2, {key:bar,x:0}:3 }");
tester.assertEvaluates("3.0", "tensor0{bar}", true, "{ {x:foo}:1, {x:bar}:3 }");
tester.assertEvaluates("3.3", "tensor0[2]", "tensor(values[4]):[1.1, 2.2, 3.3, 4.4]]");
tester.assertEvaluates("tensor(x[5]):[0, 1, 2, 3, 4]",
"concat(tensor0, tensor1, x)",
"tensor(x[2]):[0, 1]",
"tensor(x[3]):[2, 3, 4])");
tester.assertEvaluates("{ {x:0}:0.25, {x:1}:0.75 }", "l1_normalize(tensor0, x)", "{ {x:0}:1, {x:1}:3 }");
tester.assertEvaluates("{ {x:0}:0.31622776601683794, {x:1}:0.9486832980505138 }", "l2_normalize(tensor0, x)", "{ {x:0}:1, {x:1}:3 }");
tester.assertEvaluates("{ {y:0}:81.0 }", "matmul(tensor0, tensor1, x)", "{ {x:0}:15, {x:1}:12 }", "{ {y:0}:3 }");
tester.assertEvaluates("{ {x:0}:0.5, {x:1}:0.5 }", "softmax(tensor0, x)", "{ {x:0}:1, {x:1}:1 }", "{ {y:0}:1 }");
tester.assertEvaluates("{ {x:0,y:0}:81.0, {x:1,y:0}:88.0 }", "xw_plus_b(tensor0, tensor1, tensor2, x)", "{ {x:0}:15, {x:1}:12 }", "{ {y:0}:3 }", "{ {x:0}:0, {x:1}:7 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:0, {x:2}:0, {x:3}:1 }", "argmax(tensor0, x)", "{ {x:0}:15, {x:1}:12, {x:2}:7, {x:3}:15 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0, {x:2}:1, {x:3}:0 }", "argmin(tensor0, x)", "{ {x:0}:15, {x:1}:12, {x:2}:7, {x:3}:15 }");
tester.assertEvaluates("tensor(y{}):{{y:6}:0}}", "matmul(tensor0, diag(x[5],y[7]), x)", "tensor(x{},y{}):{{x:4,y:6}:1})");
tester.assertEvaluates("tensor(y{}):{{y:6}:10}} |
Currently the "packing" is implemented in the embedder, maybe we can add the inversion as well (https://github.com/vespa-engine/vespa/blob/master/model-integration/src/test/java/ai/vespa/embedding/ColBertEmbedderTest.java) | public void testUnpack() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates("tensor<float>(a{},x[16]):{foo:[" +
"0,0,0,0, 0,0,0,0," +
"1,1,1,1, 1,1,1,1" +
"],bar:[" +
"0,0,0,0, 0,0,0,1," +
"1,1,1,1, 1,0,0,0]}",
"unpack_bits_from_int8(tensor0, float, big)",
"tensor<int8>(a{},x[2]):{foo:[0,255],bar:[1,-8]}");
tester.assertEvaluates("tensor<int8>(a{},x[16]):{foo:[" +
"0,0,0,0, 0,0,0,0," +
"1,1,1,1, 1,1,1,1" +
"],bar:[" +
"1,0,0,0, 0,0,0,0," +
"0,0,0,1, 1,1,1,1]}",
"unpack_bits_from_int8(tensor0, int8, little)",
"tensor<int8>(a{},x[2]):{foo:[0,255],bar:[1,-8]}");
} | "tensor<int8>(a{},x[2]):{foo:[0,255],bar:[1,-8]}"); | public void testUnpack() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates("tensor<float>(a{},x[16]):{foo:[" +
"0,0,0,0, 0,0,0,0," +
"1,1,1,1, 1,1,1,1" +
"],bar:[" +
"0,0,0,0, 0,0,0,1," +
"1,1,1,1, 1,0,0,0]}",
"unpack_bits(tensor0, float, big)",
"tensor<int8>(a{},x[2]):{foo:[0,-1],bar:[1,-8]}");
tester.assertEvaluates("tensor<int8>(a{},x[16]):{foo:[" +
"0,0,0,0, 0,0,0,0," +
"1,1,1,1, 1,1,1,1" +
"],bar:[" +
"1,0,0,0, 0,0,0,0," +
"0,0,0,1, 1,1,1,1]}",
"unpack_bits(tensor0, int8, little)",
"tensor<int8>(a{},x[2]):{foo:[0,-1],bar:[1,-8]}");
} | class EvaluationTestCase {
private final double tolerance = 0.000001;
private void verifyStringValueToString(String s) {
s = '"' + s + '"';
Value val = Value.parse(s);
assertTrue(val instanceof StringValue);
assertEquals(s, val.toString());
}
@Test
public void testStringValueToString() {
verifyStringValueToString("");
verifyStringValueToString("something");
verifyStringValueToString("needs \\\" escape");
verifyStringValueToString("\\\\");
verifyStringValueToString("\\\"");
verifyStringValueToString("\\f");
verifyStringValueToString("\\female");
verifyStringValueToString("\\n");
verifyStringValueToString("\\nude");
verifyStringValueToString("\\r");
verifyStringValueToString("fa\\rt");
verifyStringValueToString("\\t");
verifyStringValueToString("fe\\tish");
verifyStringValueToString("\\f");
verifyStringValueToString("\\\\hx");
verifyStringValueToString("\\\\xx");
verifyStringValueToString("\\\\x10081977");
}
@Test
public void testEvaluationOrder() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates(-4, "1 + -2 + -3");
tester.assertEvaluates(2, "1 - (2 - 3)");
tester.assertEvaluates(-4, "(1 - 2) - 3");
tester.assertEvaluates(-4, "1 - 2 - 3");
}
@Test
public void testEvaluation() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates(0.5, "0.5");
tester.assertEvaluates(-0.5, "-0.5");
tester.assertEvaluates(0.5, "one_half");
tester.assertEvaluates(-0.5, "-one_half");
tester.assertEvaluates(0, "nonexisting");
tester.assertEvaluates(0.75, "0.5 + 0.25");
tester.assertEvaluates(0.75, "one_half + a_quarter");
tester.assertEvaluates(1.25, "0.5 - 0.25 + one");
tester.assertEvaluates(9.0, "3 ^ 2");
tester.assertEvaluates(1, "if(\"a\"==\"a\",1,0)");
tester.assertEvaluates(26, "2*3+4*5");
tester.assertEvaluates(1, "2/6+4/6");
tester.assertEvaluates(2 * 3 * 4 + 3 * 4 * 5 - 4 * 200 / 10, "2*3*4+3*4*5-4*200/10");
tester.assertEvaluates(3, "1 + 10 % 6 / 2");
tester.assertEvaluates(10.0, "3 ^ 2 + 1");
tester.assertEvaluates(18.0, "2 * 3 ^ 2");
tester.assertEvaluates(-4, "1 - 2 - 3");
tester.assertEvaluates(Math.pow(4, 9), "4^3^2");
tester.assertEvaluates(2 * (3 * 4 + 3) * (4 * 5 - 4 * 200) / 10, "2*(3*4+3)*(4*5-4*200)/10");
tester.assertEvaluates(0.5, "if( 2<3, one_half, one_quarter)");
tester.assertEvaluates(0.25,"if( 2>3, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 1==1, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 1<=1, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 1<=1.1, one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( 1>=1.1, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 0.33333333333333333333~=1/3, one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( 0.33333333333333333333~=1/35, one_half, a_quarter)");
tester.assertEvaluates(5.5, "if(one_half in [one_quarter,one_half], one_half+5,log(one_quarter) * one_quarter)");
tester.assertEvaluates(0.5, "if( 1 in [1,2 , 3], one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( 1 in [ 2,3,4], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( \"foo\" in [\"foo\",\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( foo in [\"foo\",\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( \"foo\" in [foo,\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( foo in [foo,\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( \"foo\" in [\"baz\",\"boz\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( one in [0, 1, 2], one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( one in [2], one_half, a_quarter)");
tester.assertEvaluates(2.5, "if(1.0, 2.5, 3.5)");
tester.assertEvaluates(3.5, "if(0.0, 2.5, 3.5)");
tester.assertEvaluates(2.5, "if(1.0-1.1, 2.5, 3.5)");
tester.assertEvaluates(3.5, "if(1.0-1.0, 2.5, 3.5)");
RankingExpression e = tester.assertEvaluates(3.5, "if(1.0-1.0, 2.5, 3.5, 0.3)");
assertEquals(0.3d, ((IfNode) e.getRoot()).getTrueProbability(), tolerance);
tester.assertEvaluates(new BooleanValue(true), "2<3");
tester.assertEvaluates(new BooleanValue(false), "2>3");
tester.assertEvaluates(new BooleanValue(false), "if (3>2, 2>3, 5.0)");
tester.assertEvaluates(new BooleanValue(true), "2>3<1");
tester.assertEvaluates(2.5, "if(2>3<1, 2.5, 3.5)");
tester.assertEvaluates(2.5, "if(1+1>3<1+0, 2.5, 3.5)");
tester.assertEvaluates(0, "sin(0)");
tester.assertEvaluates(1, "cos(0)");
tester.assertEvaluates(8, "pow(4/2,min(cos(0)*3,5))");
tester.assertEvaluates(0, "random(1)");
tester.assertEvaluates(0, "random(foo)");
tester.assertEvaluates(1.25, "5*if(1>=1.1, one_half, if(min(1,2)<max(1,2),if (\"foo\" in [\"foo\",\"bar\"],a_quarter,3000), 0.57345347))");
}
@Test
public void testBooleanEvaluation() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates(false, "false");
tester.assertEvaluates(true, "true");
tester.assertEvaluates(false, "0 && 0");
tester.assertEvaluates(false, "0 && 1");
tester.assertEvaluates(false, "1 && 0");
tester.assertEvaluates(true, "1 && 1");
tester.assertEvaluates(true, "1 && 2");
tester.assertEvaluates(true, "1 && 0.1");
tester.assertEvaluates(false, "0 || 0");
tester.assertEvaluates(true, "0 || 0.1");
tester.assertEvaluates(true, "0 || 1");
tester.assertEvaluates(true, "1 || 0");
tester.assertEvaluates(true, "1 || 1");
tester.assertEvaluates(true, "!0");
tester.assertEvaluates(false, "!1");
tester.assertEvaluates(false, "!2");
tester.assertEvaluates(true, "!0 && 1");
tester.assertEvaluates(0, "2 * (0 && 1)");
tester.assertEvaluates(2, "2 * (1 && 1)");
tester.assertEvaluates(true, "2 + 0 && 1");
tester.assertEvaluates(true, "1 && 0 + 2");
}
@Test
@Test
public void testMapSubspaces() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates("tensor<float>(a{},x[2]):{foo:[2,3],bar:[7,10]}",
"map_subspaces(tensor0, f(t)(t))",
"tensor<float>(a{},x[2]):{foo:[2,3],bar:[7,10]}");
tester.assertEvaluates("tensor<float>(a{},x[2]):{foo:[2,3],bar:[7,10]}",
"map_subspaces(tensor0, f(t)(t+2))",
"tensor<float>(a{},x[2]):{foo:[0,1],bar:[5,8]}");
tester.assertEvaluates("tensor<float>(a{},y[2]):{foo:[3,5],bar:[9,11]}",
"map_subspaces(tensor0, f(t)(tensor<float>(y[2])(t{x:(y)}+t{x:(y+1)})))",
"tensor(a{},x[3]):{foo:[1,2,3],bar:[4,5,6]}");
tester.assertEvaluates("tensor<double>(a{},x[2]):{foo:[3,5],bar:[9,11]}",
"map_subspaces(tensor0, f(t)(tensor(x[2])(t{x:(x)}+t{x:(x+1)})))",
"tensor<float>(a{},x[3]):{foo:[1,2,3],bar:[4,5,6]}");
}
@Test
public void testTensorEvaluation() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates("{}", "tensor0", "{}");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:2, {d1:2 }:3 }",
"map(tensor0, f(x) (log10(x)))", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:4, {d1:1}:9, {d1:2 }:16 }",
"map(tensor0, f(x) (x * x))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:0, {d1:1}:1, {d1:2 }:0 }",
"tensor0 == 3", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:0, {d1:1}:1, {d1:2 }:0 }",
"3 == tensor0", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:2, {d1:2 }:3 }",
"log10(tensor0)", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:-10, {d1:1}:-100, {d1:2 }:-1000 }",
"- tensor0", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:-10, {d1:1}:0, {d1:2 }:0 }",
"min(tensor0, 0)", "{ {d1:0}:-10, {d1:1}:0, {d1:2}:10 }");
tester.assertEvaluates("{ {d1:0}:0, {d1:1}:0, {d1:2 }:10 }",
"max(tensor0, 0)", "{ {d1:0}:-10, {d1:1}:0, {d1:2}:10 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"tensor0 % 2 == map(tensor0, f(x) (x % 2))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"(tensor0 || 1) == map(tensor0, f(x) (x || 1))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"(tensor0 && 1) == map(tensor0, f(x) (x && 1))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"!tensor0 == map(tensor0, f(x) (!x))", "{ {d1:0}:0, {d1:1}:1, {d1:2}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "abs(tensor0)", "{ {x:0}:1, {x:1}:-2 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "acos(tensor0)", "{ {x:0}:1, {x:1}:1 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "asin(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "atan(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "ceil(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:1 }", "cos(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:1 }", "cosh(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "elu(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:1 }", "exp(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "fabs(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "floor(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "isNan(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "log(tensor0)", "{ {x:0}:1, {x:1}:1 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:1 }", "log10(tensor0)", "{ {x:0}:1, {x:1}:10 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:2 }", "fmod(tensor0, 3)","{ {x:0}:3, {x:1}:8 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:8 }", "pow(tensor0, 3)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:8, {x:1}:16 }", "ldexp(tensor0,3.1)","{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "relu(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "round(tensor0)", "{ {x:0}:1, {x:1}:1.8 }");
tester.assertEvaluates("{ {x:0}:0.5, {x:1}:0.5 }", "sigmoid(tensor0)","{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:-1 }", "sign(tensor0)", "{ {x:0}:3, {x:1}:-5 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "sin(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "sinh(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:4 }", "square(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:3 }", "sqrt(tensor0)", "{ {x:0}:1, {x:1}:9 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "tan(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "tanh(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, avg, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, count, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:7 }",
"reduce(tensor0, max, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, median, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:1 }",
"reduce(tensor0, min, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:105 }",
"reduce(tensor0, prod, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:16 }",
"reduce(tensor0, sum, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, avg)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:2, {y:1}:6 }",
"reduce(tensor0, avg, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:2, {y:1}:2 }",
"reduce(tensor0, count, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:3, {y:1}:35 }",
"reduce(tensor0, prod, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:4, {y:1}:12 }",
"reduce(tensor0, sum, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:3, {y:1}:7 }",
"reduce(tensor0, max, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:1, {y:1}:5 }",
"reduce(tensor0, min, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}: 5 }", "sum(tensor0)", "5.0");
tester.assertEvaluates("{ {}:-5 }", "sum(tensor0)", "-5.0");
tester.assertEvaluates("{ {}:12.5 }", "sum(tensor0)", "{ {d1:0}:5.5, {d1:1}:7.0 }");
tester.assertEvaluates("{ {}: 0 }", "sum(tensor0)", "{ {d1:0}:5.0, {d1:1}:7.0, {d1:2}:-12.0}");
tester.assertEvaluates("{ {}: 8.0 }", "avg(tensor0)", "{ {d1:0}:5.0, {d1:1}:7.0, {d1:2}:12.0}");
tester.assertEvaluates("{ {}: 5.0 }", "median(tensor0)", "{ {d1:0}:5.0, {d1:1}:7.0, {d1:2}:-12.0}");
tester.assertEvaluates("{ {y:0}:4, {y:1}:12.0 }",
"sum(tensor0, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {x:0}:6, {x:1}:10.0 }",
"sum(tensor0, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:16 }",
"sum(tensor0, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}: -1 }", "reduce(tensor0, max)", "tensor(x[2]):[-2,-1]");
tester.assertEvaluates("{ {x:0,y:0}:15, {x:1,y:0}:35 }", "join(tensor0, tensor1, f(x,y) (x*y))", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:6, {x:1,y:0}:14 }", "join(tensor0, tensor1, f(x,y) (x+x))", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0}:2, {x:1}:-3 }", "join(tensor0, tensor1, f(x,y) (y-x))", "{ {x:0}:3, {x:1}:7 }", "{ {x:0}:5, {x:1}:4 }");
tester.assertEvaluates("{ }", "tensor0 * tensor0", "{}");
tester.assertEvaluates("{{x:0,y:0,z:0}:0.0}", "( tensor0 * tensor1 ) * ( tensor2 * tensor1 )",
"{{x:0}:1}", "{}", "{{y:0,z:0}:1}");
tester.assertEvaluates("tensor(x{}):{}",
"tensor0 * tensor1", "{ {x:0}:3 }", "tensor(x{}):{ {x:1}:5 }");
tester.assertEvaluates("tensor<double>(x{}):{}",
"tensor0 * tensor1", "{ {x:0}:3 }", "tensor<float>(x{}):{ {x:1}:5 }");
tester.assertEvaluates("{ {x:0}:15 }",
"tensor0 * tensor1", "{ {x:0}:3 }", "{ {x:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:15 }",
"tensor0 * tensor1", "{ {x:0}:3 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:15, {x:1,y:0}:35 }",
"tensor0 * tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:8, {x:1,y:0}:12 }",
"tensor0 + tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:-2, {x:1,y:0}:2 }",
"tensor0 - tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:5, {x:1,y:0}:4 }",
"tensor0 / tensor1", "{ {x:0}:15, {x:1}:12 }", "{ {y:0}:3 }");
tester.assertEvaluates("{ {x:0,y:0}:5, {x:1,y:0}:7 }",
"max(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:5 }",
"min(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:243, {x:1,y:0}:16807 }",
"pow(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:243, {x:1,y:0}:16807 }",
"tensor0 ^ tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:2 }",
"fmod(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:2 }",
"tensor0 % tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:96, {x:1,y:0}:224 }",
"ldexp(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5.1 }");
tester.assertEvaluates("{ {x:0,y:0,z:0}:7, {x:0,y:0,z:1}:13, {x:1,y:0,z:0}:21, {x:1,y:0,z:1}:39, {x:0,y:1,z:0}:55, {x:0,y:1,z:1}:0, {x:1,y:1,z:0}:0, {x:1,y:1,z:1}:0 }",
"tensor0 * tensor1", "{ {x:0,y:0}:1, {x:1,y:0}:3, {x:0,y:1}:5, {x:1,y:1}:0 }", "{ {y:0,z:0}:7, {y:1,z:0}:11, {y:0,z:1}:13, {y:1,z:1}:0 }");
tester.assertEvaluates("{ {x:0,y:1,z:0}:35, {x:0,y:1,z:1}:65 }",
"tensor0 * tensor1", "tensor(x{},y{}):{ {x:0,y:0}:1, {x:1,y:0}:3, {x:0,y:1}:5 }", "tensor(y{},z{}):{ {y:1,z:0}:7, {y:2,z:0}:11, {y:1,z:1}:13 })");
tester.assertEvaluates("{{x:0,y:0}:0.0}","tensor1 * tensor2 * tensor3", "{ {x:0}:1 }", "{ {x:1,y:0}:1, {x:0,y:0}:1 }", "{ {x:0,y:0}:1 }");
tester.assertEvaluates("{ {d1:0}:50, {d1:1}:500, {d1:2}:5000 }",
"5 * tensor0", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:13, {d1:1}:103, {d1:2}:1003 }",
"tensor0 + 3","{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:10, {d1:2 }:100 }",
"tensor0 / 10", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {h:0}:1.5, {h:1}:1.5 }", "0.5 + tensor0", "{ {h:0}:1.0,{h:1}:1.0 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:0 }",
"atan2(tensor0, tensor1)", "{ {x:0}:0, {x:1}:0 }", "{ {y:0}:1 }");
tester.assertEvaluates("{ {x:0,y:0}:2, {x:1,y:0}:7 }",
"hamming(tensor0, tensor1)", "{ {x:0}:97, {x:1}:-1 }", "{ {y:0}:1 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 > tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 < tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 >= tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 <= tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 == tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 ~= tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 != tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:0 }",
"tensor0 in [1,2,3]", "{ {x:0}:3, {x:1}:7 }");
tester.assertEvaluates("{ {x:0}:0.1 }", "join(tensor0, 0.1, f(x,y) (x*y))", "{ {x:0}:1 }");
tester.assertEvaluates("{ {x:0}:15, {x:1}:4 }", "merge(tensor0, tensor1, f(x,y) (x*y))", "{ {x:0}:3 }", "{ {x:0}:5, {x:1}:4 }");
tester.assertEvaluates("{ }", "merge(tensor0, tensor1, f(x,y) (x*y))", "{}");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 != tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {newX:0,y:0}:3 }", "rename(tensor0, x, newX)", "{ {x:0,y:0}:3.0 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:5 }", "rename(tensor0, (x, y), (y, x))", "{ {x:0,y:0}:3.0, {x:0,y:1}:5.0 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:0, {x:0,y:1}:1, {x:1,y:1}:0, {x:0,y:2}:0, {x:1,y:2}:1 }", "tensor(x[2],y[3])(x+1==y)");
tester.assertEvaluates("{ {y:0,x:0}:0, {y:1,x:0}:0, {y:0,x:1}:1, {y:1,x:1}:0, {y:0,x:2}:0, {y:1,x:2}:1 }", "tensor(y[2],x[3])(y+1==x)");
tester.assertEvaluates("{ {x:0,y:0,z:0}:1 }", "tensor(x[1],y[1],z[1])((x==y)*(y==z))");
tester.assertEvaluates("{ {x:0}:0, {x:1}:1, {x:2}:2 }", "range(x[3])");
tester.assertEvaluates("{ {x:0,y:0,z:0}:1, {x:0,y:0,z:1}:0, {x:0,y:1,z:0}:0, {x:0,y:1,z:1}:0, {x:1,y:0,z:0}:0, {x:1,y:0,z:1}:0, {x:1,y:1,z:0}:0, {x:1,y:1,z:1}:1, }", "diag(x[2],y[2],z[2])");
tester.assertEvaluates("6", "reduce(random(x[2],y[3]), count)");
tester.assertEvaluates("tensor(x[2]):[0.0, 2.0]",
"tensor(x[2]):{{x:0}:tensor(y[2]):{{y:0}:((0+0)+a)," +
"{y:1}:((0+1)+a)}{y:0}," +
"{x:1}:tensor(y[2]):{{y:0}:((1+0)+a)," +
"{y:1}:((1+1)+a)}{y:1}" +
"}");
tester.assertEvaluates("3.0", "tensor0{x:1}", "{ {x:0}:1, {x:1}:3 }");
tester.assertEvaluates("1.2", "tensor0{key:foo,x:0}", true, "{ {key:foo,x:0}:1.2, {key:bar,x:0}:3 }");
tester.assertEvaluates("3.0", "tensor0{bar}", true, "{ {x:foo}:1, {x:bar}:3 }");
tester.assertEvaluates("3.3", "tensor0[2]", "tensor(values[4]):[1.1, 2.2, 3.3, 4.4]]");
tester.assertEvaluates("tensor(x[5]):[0, 1, 2, 3, 4]",
"concat(tensor0, tensor1, x)",
"tensor(x[2]):[0, 1]",
"tensor(x[3]):[2, 3, 4])");
tester.assertEvaluates("{ {x:0}:0.25, {x:1}:0.75 }", "l1_normalize(tensor0, x)", "{ {x:0}:1, {x:1}:3 }");
tester.assertEvaluates("{ {x:0}:0.31622776601683794, {x:1}:0.9486832980505138 }", "l2_normalize(tensor0, x)", "{ {x:0}:1, {x:1}:3 }");
tester.assertEvaluates("{ {y:0}:81.0 }", "matmul(tensor0, tensor1, x)", "{ {x:0}:15, {x:1}:12 }", "{ {y:0}:3 }");
tester.assertEvaluates("{ {x:0}:0.5, {x:1}:0.5 }", "softmax(tensor0, x)", "{ {x:0}:1, {x:1}:1 }", "{ {y:0}:1 }");
tester.assertEvaluates("{ {x:0,y:0}:81.0, {x:1,y:0}:88.0 }", "xw_plus_b(tensor0, tensor1, tensor2, x)", "{ {x:0}:15, {x:1}:12 }", "{ {y:0}:3 }", "{ {x:0}:0, {x:1}:7 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:0, {x:2}:0, {x:3}:1 }", "argmax(tensor0, x)", "{ {x:0}:15, {x:1}:12, {x:2}:7, {x:3}:15 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0, {x:2}:1, {x:3}:0 }", "argmin(tensor0, x)", "{ {x:0}:15, {x:1}:12, {x:2}:7, {x:3}:15 }");
tester.assertEvaluates("tensor(y{}):{{y:6}:0}}", "matmul(tensor0, diag(x[5],y[7]), x)", "tensor(x{},y{}):{{x:4,y:6}:1})");
tester.assertEvaluates("tensor(y{}):{{y:6}:10}} | class EvaluationTestCase {
private final double tolerance = 0.000001;
private void verifyStringValueToString(String s) {
s = '"' + s + '"';
Value val = Value.parse(s);
assertTrue(val instanceof StringValue);
assertEquals(s, val.toString());
}
@Test
public void testStringValueToString() {
verifyStringValueToString("");
verifyStringValueToString("something");
verifyStringValueToString("needs \\\" escape");
verifyStringValueToString("\\\\");
verifyStringValueToString("\\\"");
verifyStringValueToString("\\f");
verifyStringValueToString("\\female");
verifyStringValueToString("\\n");
verifyStringValueToString("\\nude");
verifyStringValueToString("\\r");
verifyStringValueToString("fa\\rt");
verifyStringValueToString("\\t");
verifyStringValueToString("fe\\tish");
verifyStringValueToString("\\f");
verifyStringValueToString("\\\\hx");
verifyStringValueToString("\\\\xx");
verifyStringValueToString("\\\\x10081977");
}
@Test
public void testEvaluationOrder() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates(-4, "1 + -2 + -3");
tester.assertEvaluates(2, "1 - (2 - 3)");
tester.assertEvaluates(-4, "(1 - 2) - 3");
tester.assertEvaluates(-4, "1 - 2 - 3");
}
@Test
public void testEvaluation() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates(0.5, "0.5");
tester.assertEvaluates(-0.5, "-0.5");
tester.assertEvaluates(0.5, "one_half");
tester.assertEvaluates(-0.5, "-one_half");
tester.assertEvaluates(0, "nonexisting");
tester.assertEvaluates(0.75, "0.5 + 0.25");
tester.assertEvaluates(0.75, "one_half + a_quarter");
tester.assertEvaluates(1.25, "0.5 - 0.25 + one");
tester.assertEvaluates(9.0, "3 ^ 2");
tester.assertEvaluates(1, "if(\"a\"==\"a\",1,0)");
tester.assertEvaluates(26, "2*3+4*5");
tester.assertEvaluates(1, "2/6+4/6");
tester.assertEvaluates(2 * 3 * 4 + 3 * 4 * 5 - 4 * 200 / 10, "2*3*4+3*4*5-4*200/10");
tester.assertEvaluates(3, "1 + 10 % 6 / 2");
tester.assertEvaluates(10.0, "3 ^ 2 + 1");
tester.assertEvaluates(18.0, "2 * 3 ^ 2");
tester.assertEvaluates(-4, "1 - 2 - 3");
tester.assertEvaluates(Math.pow(4, 9), "4^3^2");
tester.assertEvaluates(2 * (3 * 4 + 3) * (4 * 5 - 4 * 200) / 10, "2*(3*4+3)*(4*5-4*200)/10");
tester.assertEvaluates(0.5, "if( 2<3, one_half, one_quarter)");
tester.assertEvaluates(0.25,"if( 2>3, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 1==1, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 1<=1, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 1<=1.1, one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( 1>=1.1, one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( 0.33333333333333333333~=1/3, one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( 0.33333333333333333333~=1/35, one_half, a_quarter)");
tester.assertEvaluates(5.5, "if(one_half in [one_quarter,one_half], one_half+5,log(one_quarter) * one_quarter)");
tester.assertEvaluates(0.5, "if( 1 in [1,2 , 3], one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( 1 in [ 2,3,4], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( \"foo\" in [\"foo\",\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( foo in [\"foo\",\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( \"foo\" in [foo,\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( foo in [foo,\"bar\"], one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( \"foo\" in [\"baz\",\"boz\"], one_half, a_quarter)");
tester.assertEvaluates(0.5, "if( one in [0, 1, 2], one_half, a_quarter)");
tester.assertEvaluates(0.25,"if( one in [2], one_half, a_quarter)");
tester.assertEvaluates(2.5, "if(1.0, 2.5, 3.5)");
tester.assertEvaluates(3.5, "if(0.0, 2.5, 3.5)");
tester.assertEvaluates(2.5, "if(1.0-1.1, 2.5, 3.5)");
tester.assertEvaluates(3.5, "if(1.0-1.0, 2.5, 3.5)");
RankingExpression e = tester.assertEvaluates(3.5, "if(1.0-1.0, 2.5, 3.5, 0.3)");
assertEquals(0.3d, ((IfNode) e.getRoot()).getTrueProbability(), tolerance);
tester.assertEvaluates(new BooleanValue(true), "2<3");
tester.assertEvaluates(new BooleanValue(false), "2>3");
tester.assertEvaluates(new BooleanValue(false), "if (3>2, 2>3, 5.0)");
tester.assertEvaluates(new BooleanValue(true), "2>3<1");
tester.assertEvaluates(2.5, "if(2>3<1, 2.5, 3.5)");
tester.assertEvaluates(2.5, "if(1+1>3<1+0, 2.5, 3.5)");
tester.assertEvaluates(0, "sin(0)");
tester.assertEvaluates(1, "cos(0)");
tester.assertEvaluates(8, "pow(4/2,min(cos(0)*3,5))");
tester.assertEvaluates(0, "random(1)");
tester.assertEvaluates(0, "random(foo)");
tester.assertEvaluates(1.25, "5*if(1>=1.1, one_half, if(min(1,2)<max(1,2),if (\"foo\" in [\"foo\",\"bar\"],a_quarter,3000), 0.57345347))");
}
@Test
public void testBooleanEvaluation() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates(false, "false");
tester.assertEvaluates(true, "true");
tester.assertEvaluates(false, "0 && 0");
tester.assertEvaluates(false, "0 && 1");
tester.assertEvaluates(false, "1 && 0");
tester.assertEvaluates(true, "1 && 1");
tester.assertEvaluates(true, "1 && 2");
tester.assertEvaluates(true, "1 && 0.1");
tester.assertEvaluates(false, "0 || 0");
tester.assertEvaluates(true, "0 || 0.1");
tester.assertEvaluates(true, "0 || 1");
tester.assertEvaluates(true, "1 || 0");
tester.assertEvaluates(true, "1 || 1");
tester.assertEvaluates(true, "!0");
tester.assertEvaluates(false, "!1");
tester.assertEvaluates(false, "!2");
tester.assertEvaluates(true, "!0 && 1");
tester.assertEvaluates(0, "2 * (0 && 1)");
tester.assertEvaluates(2, "2 * (1 && 1)");
tester.assertEvaluates(true, "2 + 0 && 1");
tester.assertEvaluates(true, "1 && 0 + 2");
}
@Test
@Test
public void testMapSubspaces() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates("tensor<float>(a{},x[2]):{foo:[2,3],bar:[7,10]}",
"map_subspaces(tensor0, f(t)(t))",
"tensor<float>(a{},x[2]):{foo:[2,3],bar:[7,10]}");
tester.assertEvaluates("tensor<float>(a{},x[2]):{foo:[2,3],bar:[7,10]}",
"map_subspaces(tensor0, f(t)(t+2))",
"tensor<float>(a{},x[2]):{foo:[0,1],bar:[5,8]}");
tester.assertEvaluates("tensor<float>(a{},y[2]):{foo:[3,5],bar:[9,11]}",
"map_subspaces(tensor0, f(t)(tensor<float>(y[2])(t{x:(y)}+t{x:(y+1)})))",
"tensor(a{},x[3]):{foo:[1,2,3],bar:[4,5,6]}");
tester.assertEvaluates("tensor<double>(a{},x[2]):{foo:[3,5],bar:[9,11]}",
"map_subspaces(tensor0, f(t)(tensor(x[2])(t{x:(x)}+t{x:(x+1)})))",
"tensor<float>(a{},x[3]):{foo:[1,2,3],bar:[4,5,6]}");
}
@Test
public void testTensorEvaluation() {
EvaluationTester tester = new EvaluationTester();
tester.assertEvaluates("{}", "tensor0", "{}");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:2, {d1:2 }:3 }",
"map(tensor0, f(x) (log10(x)))", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:4, {d1:1}:9, {d1:2 }:16 }",
"map(tensor0, f(x) (x * x))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:0, {d1:1}:1, {d1:2 }:0 }",
"tensor0 == 3", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:0, {d1:1}:1, {d1:2 }:0 }",
"3 == tensor0", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:2, {d1:2 }:3 }",
"log10(tensor0)", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:-10, {d1:1}:-100, {d1:2 }:-1000 }",
"- tensor0", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:-10, {d1:1}:0, {d1:2 }:0 }",
"min(tensor0, 0)", "{ {d1:0}:-10, {d1:1}:0, {d1:2}:10 }");
tester.assertEvaluates("{ {d1:0}:0, {d1:1}:0, {d1:2 }:10 }",
"max(tensor0, 0)", "{ {d1:0}:-10, {d1:1}:0, {d1:2}:10 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"tensor0 % 2 == map(tensor0, f(x) (x % 2))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"(tensor0 || 1) == map(tensor0, f(x) (x || 1))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"(tensor0 && 1) == map(tensor0, f(x) (x && 1))", "{ {d1:0}:2, {d1:1}:3, {d1:2}:4 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:1, {d1:2 }:1 }",
"!tensor0 == map(tensor0, f(x) (!x))", "{ {d1:0}:0, {d1:1}:1, {d1:2}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "abs(tensor0)", "{ {x:0}:1, {x:1}:-2 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "acos(tensor0)", "{ {x:0}:1, {x:1}:1 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "asin(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "atan(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "ceil(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:1 }", "cos(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:1 }", "cosh(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "elu(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:1 }", "exp(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "fabs(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "floor(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "isNan(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "log(tensor0)", "{ {x:0}:1, {x:1}:1 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:1 }", "log10(tensor0)", "{ {x:0}:1, {x:1}:10 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:2 }", "fmod(tensor0, 3)","{ {x:0}:3, {x:1}:8 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:8 }", "pow(tensor0, 3)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:8, {x:1}:16 }", "ldexp(tensor0,3.1)","{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "relu(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:2 }", "round(tensor0)", "{ {x:0}:1, {x:1}:1.8 }");
tester.assertEvaluates("{ {x:0}:0.5, {x:1}:0.5 }", "sigmoid(tensor0)","{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:-1 }", "sign(tensor0)", "{ {x:0}:3, {x:1}:-5 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "sin(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "sinh(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:4 }", "square(tensor0)", "{ {x:0}:1, {x:1}:2 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:3 }", "sqrt(tensor0)", "{ {x:0}:1, {x:1}:9 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "tan(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0 }", "tanh(tensor0)", "{ {x:0}:0, {x:1}:0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, avg, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, count, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:7 }",
"reduce(tensor0, max, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, median, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:1 }",
"reduce(tensor0, min, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:105 }",
"reduce(tensor0, prod, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:16 }",
"reduce(tensor0, sum, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:4 }",
"reduce(tensor0, avg)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:2, {y:1}:6 }",
"reduce(tensor0, avg, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:2, {y:1}:2 }",
"reduce(tensor0, count, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:3, {y:1}:35 }",
"reduce(tensor0, prod, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:4, {y:1}:12 }",
"reduce(tensor0, sum, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:3, {y:1}:7 }",
"reduce(tensor0, max, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {y:0}:1, {y:1}:5 }",
"reduce(tensor0, min, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}: 5 }", "sum(tensor0)", "5.0");
tester.assertEvaluates("{ {}:-5 }", "sum(tensor0)", "-5.0");
tester.assertEvaluates("{ {}:12.5 }", "sum(tensor0)", "{ {d1:0}:5.5, {d1:1}:7.0 }");
tester.assertEvaluates("{ {}: 0 }", "sum(tensor0)", "{ {d1:0}:5.0, {d1:1}:7.0, {d1:2}:-12.0}");
tester.assertEvaluates("{ {}: 8.0 }", "avg(tensor0)", "{ {d1:0}:5.0, {d1:1}:7.0, {d1:2}:12.0}");
tester.assertEvaluates("{ {}: 5.0 }", "median(tensor0)", "{ {d1:0}:5.0, {d1:1}:7.0, {d1:2}:-12.0}");
tester.assertEvaluates("{ {y:0}:4, {y:1}:12.0 }",
"sum(tensor0, x)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {x:0}:6, {x:1}:10.0 }",
"sum(tensor0, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:16 }",
"sum(tensor0, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}: -1 }", "reduce(tensor0, max)", "tensor(x[2]):[-2,-1]");
tester.assertEvaluates("{ {x:0,y:0}:15, {x:1,y:0}:35 }", "join(tensor0, tensor1, f(x,y) (x*y))", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:6, {x:1,y:0}:14 }", "join(tensor0, tensor1, f(x,y) (x+x))", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0}:2, {x:1}:-3 }", "join(tensor0, tensor1, f(x,y) (y-x))", "{ {x:0}:3, {x:1}:7 }", "{ {x:0}:5, {x:1}:4 }");
tester.assertEvaluates("{ }", "tensor0 * tensor0", "{}");
tester.assertEvaluates("{{x:0,y:0,z:0}:0.0}", "( tensor0 * tensor1 ) * ( tensor2 * tensor1 )",
"{{x:0}:1}", "{}", "{{y:0,z:0}:1}");
tester.assertEvaluates("tensor(x{}):{}",
"tensor0 * tensor1", "{ {x:0}:3 }", "tensor(x{}):{ {x:1}:5 }");
tester.assertEvaluates("tensor<double>(x{}):{}",
"tensor0 * tensor1", "{ {x:0}:3 }", "tensor<float>(x{}):{ {x:1}:5 }");
tester.assertEvaluates("{ {x:0}:15 }",
"tensor0 * tensor1", "{ {x:0}:3 }", "{ {x:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:15 }",
"tensor0 * tensor1", "{ {x:0}:3 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:15, {x:1,y:0}:35 }",
"tensor0 * tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:8, {x:1,y:0}:12 }",
"tensor0 + tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:-2, {x:1,y:0}:2 }",
"tensor0 - tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:5, {x:1,y:0}:4 }",
"tensor0 / tensor1", "{ {x:0}:15, {x:1}:12 }", "{ {y:0}:3 }");
tester.assertEvaluates("{ {x:0,y:0}:5, {x:1,y:0}:7 }",
"max(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:5 }",
"min(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:243, {x:1,y:0}:16807 }",
"pow(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:243, {x:1,y:0}:16807 }",
"tensor0 ^ tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:2 }",
"fmod(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:2 }",
"tensor0 % tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:96, {x:1,y:0}:224 }",
"ldexp(tensor0, tensor1)", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5.1 }");
tester.assertEvaluates("{ {x:0,y:0,z:0}:7, {x:0,y:0,z:1}:13, {x:1,y:0,z:0}:21, {x:1,y:0,z:1}:39, {x:0,y:1,z:0}:55, {x:0,y:1,z:1}:0, {x:1,y:1,z:0}:0, {x:1,y:1,z:1}:0 }",
"tensor0 * tensor1", "{ {x:0,y:0}:1, {x:1,y:0}:3, {x:0,y:1}:5, {x:1,y:1}:0 }", "{ {y:0,z:0}:7, {y:1,z:0}:11, {y:0,z:1}:13, {y:1,z:1}:0 }");
tester.assertEvaluates("{ {x:0,y:1,z:0}:35, {x:0,y:1,z:1}:65 }",
"tensor0 * tensor1", "tensor(x{},y{}):{ {x:0,y:0}:1, {x:1,y:0}:3, {x:0,y:1}:5 }", "tensor(y{},z{}):{ {y:1,z:0}:7, {y:2,z:0}:11, {y:1,z:1}:13 })");
tester.assertEvaluates("{{x:0,y:0}:0.0}","tensor1 * tensor2 * tensor3", "{ {x:0}:1 }", "{ {x:1,y:0}:1, {x:0,y:0}:1 }", "{ {x:0,y:0}:1 }");
tester.assertEvaluates("{ {d1:0}:50, {d1:1}:500, {d1:2}:5000 }",
"5 * tensor0", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:13, {d1:1}:103, {d1:2}:1003 }",
"tensor0 + 3","{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {d1:0}:1, {d1:1}:10, {d1:2 }:100 }",
"tensor0 / 10", "{ {d1:0}:10, {d1:1}:100, {d1:2}:1000 }");
tester.assertEvaluates("{ {h:0}:1.5, {h:1}:1.5 }", "0.5 + tensor0", "{ {h:0}:1.0,{h:1}:1.0 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:0 }",
"atan2(tensor0, tensor1)", "{ {x:0}:0, {x:1}:0 }", "{ {y:0}:1 }");
tester.assertEvaluates("{ {x:0,y:0}:2, {x:1,y:0}:7 }",
"hamming(tensor0, tensor1)", "{ {x:0}:97, {x:1}:-1 }", "{ {y:0}:1 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 > tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 < tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 >= tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 <= tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 == tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:1 }",
"tensor0 ~= tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 != tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:0 }",
"tensor0 in [1,2,3]", "{ {x:0}:3, {x:1}:7 }");
tester.assertEvaluates("{ {x:0}:0.1 }", "join(tensor0, 0.1, f(x,y) (x*y))", "{ {x:0}:1 }");
tester.assertEvaluates("{ {x:0}:15, {x:1}:4 }", "merge(tensor0, tensor1, f(x,y) (x*y))", "{ {x:0}:3 }", "{ {x:0}:5, {x:1}:4 }");
tester.assertEvaluates("{ }", "merge(tensor0, tensor1, f(x,y) (x*y))", "{}");
tester.assertEvaluates("{ {x:0,y:0}:1, {x:1,y:0}:0 }",
"tensor0 != tensor1", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:7 }");
tester.assertEvaluates("{ {newX:0,y:0}:3 }", "rename(tensor0, x, newX)", "{ {x:0,y:0}:3.0 }");
tester.assertEvaluates("{ {x:0,y:0}:3, {x:1,y:0}:5 }", "rename(tensor0, (x, y), (y, x))", "{ {x:0,y:0}:3.0, {x:0,y:1}:5.0 }");
tester.assertEvaluates("{ {x:0,y:0}:0, {x:1,y:0}:0, {x:0,y:1}:1, {x:1,y:1}:0, {x:0,y:2}:0, {x:1,y:2}:1 }", "tensor(x[2],y[3])(x+1==y)");
tester.assertEvaluates("{ {y:0,x:0}:0, {y:1,x:0}:0, {y:0,x:1}:1, {y:1,x:1}:0, {y:0,x:2}:0, {y:1,x:2}:1 }", "tensor(y[2],x[3])(y+1==x)");
tester.assertEvaluates("{ {x:0,y:0,z:0}:1 }", "tensor(x[1],y[1],z[1])((x==y)*(y==z))");
tester.assertEvaluates("{ {x:0}:0, {x:1}:1, {x:2}:2 }", "range(x[3])");
tester.assertEvaluates("{ {x:0,y:0,z:0}:1, {x:0,y:0,z:1}:0, {x:0,y:1,z:0}:0, {x:0,y:1,z:1}:0, {x:1,y:0,z:0}:0, {x:1,y:0,z:1}:0, {x:1,y:1,z:0}:0, {x:1,y:1,z:1}:1, }", "diag(x[2],y[2],z[2])");
tester.assertEvaluates("6", "reduce(random(x[2],y[3]), count)");
tester.assertEvaluates("tensor(x[2]):[0.0, 2.0]",
"tensor(x[2]):{{x:0}:tensor(y[2]):{{y:0}:((0+0)+a)," +
"{y:1}:((0+1)+a)}{y:0}," +
"{x:1}:tensor(y[2]):{{y:0}:((1+0)+a)," +
"{y:1}:((1+1)+a)}{y:1}" +
"}");
tester.assertEvaluates("3.0", "tensor0{x:1}", "{ {x:0}:1, {x:1}:3 }");
tester.assertEvaluates("1.2", "tensor0{key:foo,x:0}", true, "{ {key:foo,x:0}:1.2, {key:bar,x:0}:3 }");
tester.assertEvaluates("3.0", "tensor0{bar}", true, "{ {x:foo}:1, {x:bar}:3 }");
tester.assertEvaluates("3.3", "tensor0[2]", "tensor(values[4]):[1.1, 2.2, 3.3, 4.4]]");
tester.assertEvaluates("tensor(x[5]):[0, 1, 2, 3, 4]",
"concat(tensor0, tensor1, x)",
"tensor(x[2]):[0, 1]",
"tensor(x[3]):[2, 3, 4])");
tester.assertEvaluates("{ {x:0}:0.25, {x:1}:0.75 }", "l1_normalize(tensor0, x)", "{ {x:0}:1, {x:1}:3 }");
tester.assertEvaluates("{ {x:0}:0.31622776601683794, {x:1}:0.9486832980505138 }", "l2_normalize(tensor0, x)", "{ {x:0}:1, {x:1}:3 }");
tester.assertEvaluates("{ {y:0}:81.0 }", "matmul(tensor0, tensor1, x)", "{ {x:0}:15, {x:1}:12 }", "{ {y:0}:3 }");
tester.assertEvaluates("{ {x:0}:0.5, {x:1}:0.5 }", "softmax(tensor0, x)", "{ {x:0}:1, {x:1}:1 }", "{ {y:0}:1 }");
tester.assertEvaluates("{ {x:0,y:0}:81.0, {x:1,y:0}:88.0 }", "xw_plus_b(tensor0, tensor1, tensor2, x)", "{ {x:0}:15, {x:1}:12 }", "{ {y:0}:3 }", "{ {x:0}:0, {x:1}:7 }");
tester.assertEvaluates("{ {x:0}:1, {x:1}:0, {x:2}:0, {x:3}:1 }", "argmax(tensor0, x)", "{ {x:0}:15, {x:1}:12, {x:2}:7, {x:3}:15 }");
tester.assertEvaluates("{ {x:0}:0, {x:1}:0, {x:2}:1, {x:3}:0 }", "argmin(tensor0, x)", "{ {x:0}:15, {x:1}:12, {x:2}:7, {x:3}:15 }");
tester.assertEvaluates("tensor(y{}):{{y:6}:0}}", "matmul(tensor0, diag(x[5],y[7]), x)", "tensor(x{},y{}):{{x:4,y:6}:1})");
tester.assertEvaluates("tensor(y{}):{{y:6}:10}} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.