comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
Why not throw `InvalidApplicationException` instead? | public HttpResponse handle(HttpRequest request) {
log.log(Level.FINE, () -> request.getMethod() + " " + request.getUri().toString());
try {
return switch (request.getMethod()) {
case POST -> handlePOST(request);
case GET -> handleGET(request);
case PUT -> handlePUT(request);
case DELETE -> handleDELETE(request);
default -> createErrorResponse(request.getMethod());
};
} catch (NotFoundException | com.yahoo.vespa.config.server.NotFoundException e) {
return HttpErrorResponse.notFoundError(getMessage(e, request));
} catch (ActivationConflictException e) {
return HttpErrorResponse.conflictWhenActivating(getMessage(e, request));
} catch (InvalidApplicationException e) {
return HttpErrorResponse.invalidApplicationPackage(getMessage(e, request));
} catch (IllegalArgumentException | UnsupportedOperationException e) {
return HttpErrorResponse.badRequest(getMessage(e, request));
} catch (NodeAllocationException e) {
return e.retryable() ? HttpErrorResponse.nodeAllocationFailure(getMessage(e, request))
: HttpErrorResponse.invalidApplicationPackage(getMessage(e, request));
} catch (InternalServerException e) {
return HttpErrorResponse.internalServerError(getMessage(e, request));
} catch (UnknownVespaVersionException e) {
return HttpErrorResponse.unknownVespaVersion(getMessage(e, request));
} catch (RequestTimeoutException e) {
return HttpErrorResponse.requestTimeout(getMessage(e, request));
} catch (ApplicationLockException e) {
return HttpErrorResponse.applicationLockFailure(getMessage(e, request));
} catch (ParentHostUnavailableException e) {
return HttpErrorResponse.parentHostNotReady(getMessage(e, request));
} catch (CertificateNotReadyException e) {
return HttpErrorResponse.certificateNotReady(getMessage(e, request));
} catch (ConfigNotConvergedException e) {
return HttpErrorResponse.configNotConverged(getMessage(e, request));
} catch (LoadBalancerServiceException e) {
return HttpErrorResponse.loadBalancerNotReady(getMessage(e, request));
} catch (ReindexingStatusException e) {
return HttpErrorResponse.reindexingStatusUnavailable(getMessage(e, request));
} catch (Exception e) {
log.log(Level.WARNING, "Unexpected exception handling a config server request", e);
return HttpErrorResponse.internalServerError(getMessage(e, request));
}
} | return e.retryable() ? HttpErrorResponse.nodeAllocationFailure(getMessage(e, request)) | public HttpResponse handle(HttpRequest request) {
log.log(Level.FINE, () -> request.getMethod() + " " + request.getUri().toString());
try {
return switch (request.getMethod()) {
case POST -> handlePOST(request);
case GET -> handleGET(request);
case PUT -> handlePUT(request);
case DELETE -> handleDELETE(request);
default -> createErrorResponse(request.getMethod());
};
} catch (NotFoundException | com.yahoo.vespa.config.server.NotFoundException e) {
return HttpErrorResponse.notFoundError(getMessage(e, request));
} catch (ActivationConflictException e) {
return HttpErrorResponse.conflictWhenActivating(getMessage(e, request));
} catch (InvalidApplicationException e) {
return HttpErrorResponse.invalidApplicationPackage(getMessage(e, request));
} catch (IllegalArgumentException | UnsupportedOperationException e) {
return HttpErrorResponse.badRequest(getMessage(e, request));
} catch (NodeAllocationException e) {
return e.retryable() ? HttpErrorResponse.nodeAllocationFailure(getMessage(e, request))
: HttpErrorResponse.invalidApplicationPackage(getMessage(e, request));
} catch (InternalServerException e) {
return HttpErrorResponse.internalServerError(getMessage(e, request));
} catch (UnknownVespaVersionException e) {
return HttpErrorResponse.unknownVespaVersion(getMessage(e, request));
} catch (RequestTimeoutException e) {
return HttpErrorResponse.requestTimeout(getMessage(e, request));
} catch (ApplicationLockException e) {
return HttpErrorResponse.applicationLockFailure(getMessage(e, request));
} catch (ParentHostUnavailableException e) {
return HttpErrorResponse.parentHostNotReady(getMessage(e, request));
} catch (CertificateNotReadyException e) {
return HttpErrorResponse.certificateNotReady(getMessage(e, request));
} catch (ConfigNotConvergedException e) {
return HttpErrorResponse.configNotConverged(getMessage(e, request));
} catch (LoadBalancerServiceException e) {
return HttpErrorResponse.loadBalancerNotReady(getMessage(e, request));
} catch (ReindexingStatusException e) {
return HttpErrorResponse.reindexingStatusUnavailable(getMessage(e, request));
} catch (Exception e) {
log.log(Level.WARNING, "Unexpected exception handling a config server request", e);
return HttpErrorResponse.internalServerError(getMessage(e, request));
}
} | class HttpHandler extends ThreadedHttpRequestHandler {
public HttpHandler(HttpHandler.Context ctx) {
super(ctx);
}
@Override
protected static Duration getRequestTimeout(HttpRequest request, Duration defaultTimeout) {
if ( ! request.hasProperty("timeout")) {
return defaultTimeout;
}
try {
return Duration.ofMillis((long) (Double.parseDouble(request.getProperty("timeout")) * 1000));
} catch (Exception e) {
return defaultTimeout;
}
}
private String getMessage(Exception e, HttpRequest request) {
if (request.getBooleanProperty("debug")) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
return sw.toString();
} else {
return Exceptions.toMessageString(e);
}
}
/**
* Default implementation of handler for GET requests. Returns an error response.
* Override this method to handle GET requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handleGET(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for POST requests. Returns an error response.
* Override this method to handle POST requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handlePOST(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for PUT requests. Returns an error response.
* Override this method to handle POST requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handlePUT(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for DELETE requests. Returns an error response.
* Override this method to handle DELETE requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handleDELETE(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Creates error response when method is not handled
*
* @return an error response with response code 405
*/
private HttpResponse createErrorResponse(com.yahoo.jdisc.http.HttpRequest.Method method) {
return HttpErrorResponse.methodNotAllowed("Method '" + method + "' is not supported");
}
} | class HttpHandler extends ThreadedHttpRequestHandler {
public HttpHandler(HttpHandler.Context ctx) {
super(ctx);
}
@Override
protected static Duration getRequestTimeout(HttpRequest request, Duration defaultTimeout) {
if ( ! request.hasProperty("timeout")) {
return defaultTimeout;
}
try {
return Duration.ofMillis((long) (Double.parseDouble(request.getProperty("timeout")) * 1000));
} catch (Exception e) {
return defaultTimeout;
}
}
private String getMessage(Exception e, HttpRequest request) {
if (request.getBooleanProperty("debug")) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
return sw.toString();
} else {
return Exceptions.toMessageString(e);
}
}
/**
* Default implementation of handler for GET requests. Returns an error response.
* Override this method to handle GET requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handleGET(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for POST requests. Returns an error response.
* Override this method to handle POST requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handlePOST(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for PUT requests. Returns an error response.
* Override this method to handle POST requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handlePUT(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for DELETE requests. Returns an error response.
* Override this method to handle DELETE requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handleDELETE(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Creates error response when method is not handled
*
* @return an error response with response code 405
*/
private HttpResponse createErrorResponse(com.yahoo.jdisc.http.HttpRequest.Method method) {
return HttpErrorResponse.methodNotAllowed("Method '" + method + "' is not supported");
}
} |
Hmm ... supporting `dev/perf` here isn't trivial, and perhaps not that useful? | private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName), Tags.empty());
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
} | controller.applications().createInstance(applicationId.instance(instanceName), Tags.empty()); | private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName), Tags.empty());
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
return switch (request.getMethod()) {
case GET: yield handleGET(path, request);
case PUT: yield handlePUT(path, request);
case POST: yield handlePOST(path, request);
case PATCH: yield handlePATCH(path, request);
case DELETE: yield handleDELETE(path, request);
case OPTIONS: yield handleOPTIONS();
default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
};
}
catch (RestApiException.Forbidden e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (RestApiException.Unauthorized e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return switch (e.code()) {
case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR: yield ErrorResponses.logThrowing(request, log, e);
default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
};
}
catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse accessRequests(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var accessControlService = controller.serviceRegistry().accessControlService();
var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant);
var managedAccess = accessControlService.getManagedAccess(tenant);
var slime = new Slime();
var cursor = slime.setObject();
cursor.setBool("managedAccess", managedAccess);
accessRoleInformation.getPendingRequest()
.ifPresent(membershipRequest -> {
var requestCursor = cursor.setObject("pendingRequest");
requestCursor.setString("requestTime", membershipRequest.getCreationTime());
requestCursor.setString("reason", membershipRequest.getReason());
});
var auditLogCursor = cursor.setArray("auditLog");
accessRoleInformation.getAuditLog()
.forEach(auditLogEntry -> {
var entryCursor = auditLogCursor.addObject();
entryCursor.setString("created", auditLogEntry.getCreationTime());
entryCursor.setString("approver", auditLogEntry.getApprover());
entryCursor.setString("reason", auditLogEntry.getReason());
entryCursor.setString("status", auditLogEntry.getAction());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse requestSshAccess(String tenantName, HttpRequest request) {
if (!isOperator(request)) {
return ErrorResponse.forbidden("Only operators are allowed to request ssh access");
}
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only request access for cloud tenants");
controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName));
return new MessageResponse("OK");
}
private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var inspector = toSlime(request.getData()).get();
var expiry = inspector.field("expiry").valid() ?
Instant.ofEpochMilli(inspector.field("expiry").asLong()) :
Instant.now().plus(1, ChronoUnit.DAYS);
var approve = inspector.field("approve").asBool();
controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve);
return new MessageResponse("OK");
}
private HttpResponse addManagedAccess(String tenantName) {
return setManagedAccess(tenantName, true);
}
private HttpResponse removeManagedAccess(String tenantName) {
return setManagedAccess(tenantName, false);
}
private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only set access privel for cloud tenants");
controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess);
var slime = new Slime();
slime.setObject().setBool("managedAccess", managedAccess);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> handler.apply((CloudTenant) tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var contact = root.setObject("contact");
contact.setString("name", info.contact().name());
contact.setString("email", info.contact().email());
var tenant = root.setObject("tenant");
tenant.setString("company", info.name());
tenant.setString("website", info.website());
toSlime(info.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) {
return controller.tenants().get(tenantName)
.map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get()))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var mergedContact = TenantContact.empty()
.withName(getString(inspector.field("contact").field("name"), info.contact().name()))
.withEmail(getString(inspector.field("contact").field("email"), info.contact().email()));
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address());
var mergedInfo = info
.withName(getString(inspector.field("tenant").field("name"), info.name()))
.withWebsite(getString(inspector.field("tenant").field("website"), info.website()))
.withContact(mergedContact)
.withAddress(mergedAddress);
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var billingContact = info.billingContact();
var contact = root.setObject("contact");
contact.setString("name", billingContact.contact().name());
contact.setString("email", billingContact.contact().email());
contact.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var contact = info.billingContact().contact();
var address = info.billingContact().address();
var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address());
var mergedBilling = info.billingContact()
.withContact(mergedContact)
.withAddress(mergedAddress);
var mergedInfo = info.withBilling(mergedBilling);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
toSlime(cloudTenant.info().contacts(), root);
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) {
var mergedInfo = cloudTenant.info()
.withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts()));
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private void validateMergedTenantInfo(TenantInfo mergedInfo) {
if (mergedInfo.contact().name().isBlank()) {
throw new IllegalArgumentException("'contactName' cannot be empty");
}
if (mergedInfo.contact().email().isBlank()) {
throw new IllegalArgumentException("'contactEmail' cannot be empty");
}
if (! mergedInfo.contact().email().contains("@")) {
throw new IllegalArgumentException("'contactEmail' needs to be an email address");
}
if (! mergedInfo.website().isBlank()) {
try {
new URL(mergedInfo.website());
} catch (MalformedURLException e) {
throw new IllegalArgumentException("'website' needs to be a valid address");
}
}
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
Cursor contactsCursor = parentCursor.setArray("contacts");
contacts.all().forEach(contact -> {
Cursor contactCursor = contactsCursor.addObject();
Cursor audiencesArray = contactCursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
contactCursor.setString("email", email.email());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
});
}
private static TenantContacts.Audience fromAudience(String value) {
return switch (value) {
case "tenant": yield TenantContacts.Audience.TENANT;
case "notifications": yield TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
};
}
private static String toAudience(TenantContacts.Audience audience) {
return switch (audience) {
case TENANT: yield "tenant";
case NOTIFICATIONS: yield "notifications";
};
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString().trim() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantContact mergedContact = TenantContact.empty()
.withName(getString(insp.field("contactName"), oldInfo.contact().name()))
.withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email()));
TenantInfo mergedInfo = TenantInfo.empty()
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.website()))
.withContact(mergedContact)
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()))
.withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts()));
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) {
if (!insp.valid()) return oldAddress;
TenantAddress address = TenantAddress.empty()
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code()))
.withAddress(getString(insp.field("addressLines"), oldAddress.address()));
List<String> fields = List.of(address.address(),
address.code(),
address.country(),
address.city(),
address.region());
if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank))
return address;
throw new IllegalArgumentException("All address fields must be set");
}
private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) {
if (!insp.valid()) return oldContact;
String email = getString(insp.field("email"), oldContact.email());
if (!email.isBlank() && !email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return TenantContact.empty()
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()));
}
private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) {
if (!insp.valid()) return oldContact;
return TenantBilling.empty()
.withContact(updateTenantInfoContact(insp, oldContact.contact()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) {
if (!insp.valid()) return oldContacts;
List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> {
String email = inspector.field("email").asString().trim();
List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> fromAudience(audience.asString()))
.toList();
if (!email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return new TenantContacts.EmailContact(audiences, email);
}).toList();
return new TenantContacts(contacts);
}
private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) {
boolean productionOnly = showOnlyProductionInstances(request);
boolean excludeMessages = "true".equals(request.getProperty("excludeMessages"));
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
tenant.map(t -> Stream.of(TenantName.from(t)))
.orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream())
.flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream())
.filter(notification ->
propertyEquals(request, "application", ApplicationName::from, notification.source().application()) &&
propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) &&
propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) &&
propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) &&
propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) &&
propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level())))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages));
return new SlimeJsonResponse(slime);
}
private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) {
return Optional.ofNullable(request.getProperty(property))
.map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get()))
.orElse(true);
}
private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
if (!excludeMessages) {
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
}
if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value());
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
return switch (type) {
case submission, applicationPackage: yield "applicationPackage";
case testPackage: yield "testPackage";
case deployment: yield "deployment";
case feedBlock: yield "feedBlock";
case reindex: yield "reindex";
};
}
private static String notificationLevelAsString(Notification.Level level) {
return switch (level) {
case info: yield "info";
case warning: yield "warning";
case error: yield "error";
};
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
getTenantOrThrow(tenantName);
List<Application> applications = applicationName.isEmpty() ?
controller.applications().asList(tenant) :
controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get()))
.map(List::of)
.orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist"));
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : applications) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
ZoneId zone = type.zone();
RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision();
byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse devApplicationPackageDiff(RunId runId) {
DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone());
return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
final long build;
String requestedBuild = request.getProperty("build");
if (requestedBuild != null) {
if (requestedBuild.equals("latestDeployed")) {
build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision()
.map(RevisionId::number)
.orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication));
} else {
try {
build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("invalid value for request parameter 'build'", e);
}
}
} else {
build = controller.applications().requireApplication(tenantAndApplication).revisions().last()
.map(version -> version.id().number())
.orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication));
}
RevisionId revision = RevisionId.forProduction(build);
boolean tests = request.getBooleanProperty("tests");
byte[] applicationPackage = tests ?
controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) :
controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision);
String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip";
return new ZipResponse(filename, applicationPackage);
}
private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) {
Slime slime = new Slime();
OptionalInt allowMajor = OptionalInt.empty();
if (allowMajorParam != null) {
try {
allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e);
}
}
Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor);
slime.setObject().setString("compileVersion", compileVersion.toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
if (!applicationId.tenant().equals(TenantName.from(tenantName)))
return ErrorResponse.badRequest("Invalid application id");
var zoneId = requireZone(ZoneId.from(request.getProperty("zone")));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("AWS archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeAwsArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var member = mandatory("member", data).asString();
if (member.isBlank()) {
return ErrorResponse.badRequest("GCP archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + ".");
}
private HttpResponse removeGcpArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id));
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor));
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.down());
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
nodeObject.setString("group", node.group());
nodeObject.setLong("index", node.index());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
return switch (state) {
case failed: yield "failed";
case parked: yield "parked";
case dirty: yield "dirty";
case ready: yield "ready";
case active: yield "active";
case inactive: yield "inactive";
case reserved: yield "reserved";
case provisioned: yield "provisioned";
case breakfixed: yield "breakfixed";
case deprovisioned: yield "deprovisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
};
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
return switch (type) {
case admin: yield "admin";
case content: yield "content";
case container: yield "container";
case combined: yield "combined";
case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
};
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
return switch (diskSpeed) {
case fast : yield "fast";
case slow : yield "slow";
case any : yield "any";
};
}
private static String valueOf(NodeResources.StorageType storageType) {
return switch (storageType) {
case remote : yield "remote";
case local : yield "local";
case any : yield "any";
};
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
@Override
public long maxPendingBytes() {
return 1 << 26;
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
var from = Optional.ofNullable(request.getProperty("from"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.EPOCH);
var until = Optional.ofNullable(request.getProperty("until"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.now(controller.clock()));
var application = ApplicationId.from(tenantName, applicationName, instanceName);
var zone = requireZone(environment, region);
var deployment = new DeploymentId(application, zone);
var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment);
var slime = new Slime();
var root = slime.setObject();
for (var entry : events.entrySet()) {
var serviceRoot = root.setArray(entry.getKey().clusterId().value());
scalingEventsToSlime(entry.getValue(), serviceRoot);
}
return new SlimeJsonResponse(slime);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
boolean upgradeRevision = ! requestObject.field("skipRevision").asBool();
boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") +
(upgradeRevision ? "" : "revision") +
( ! upgradeRevision && ! upgradePlatform ? " and " : "") +
(upgradePlatform ? "" : "platform") +
( ! upgradeRevision || ! upgradePlatform ? " upgrade" : "");
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id + suppressedUpgrades);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), status.application());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application());
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
addRotationId(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void addRotationId(Cursor object, Instance instance) {
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.revisions().last().ifPresent(version -> {
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
addRotationId(object, instance);
List<Deployment> deployments = application.deploymentSpec().instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
Stream.concat(status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment()),
controller.jobController().active(instance.id()).stream()
.map(run -> run.id().job())
.filter(job -> job.type().environment().isManuallyDeployed()))
.map(job -> job.type().zone())
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change, Application application) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision)));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
for (var endpoint : zoneEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application)
.targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
for (var endpoint : declaredEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", application.revisions().get(deployment.revision()).stringId());
response.setLong("build", deployment.revision().number());
Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment);
response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli()));
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
if (!deployment.zone().environment().isManuallyDeployed()) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone()));
Optional.ofNullable(status.jobSteps().get(jobId))
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision()));
if ( ! status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else
response.setString("status", "running");
});
} else {
var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId()));
deploymentRun.ifPresent(run -> {
response.setString("status", run.hasEnded() ? "complete" : "running");
});
}
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
DeploymentId deploymentId = new DeploymentId(instance.id(), zone);
RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant;
RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out;
controller.routing().of(deploymentId).setRoutingStatus(status, agent);
return new MessageResponse(Text.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId())
.requiresRotation()
.primary();
if (primaryEndpoint.isPresent()) {
DeploymentRoutingContext context = controller.routing().of(deploymentId);
RoutingStatus status = context.routingStatus();
array.addString(primaryEndpoint.get().upstreamName(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.value().name());
statusObject.setString("reason", "");
statusObject.setString("agent", status.agent().name());
statusObject.setLong("timestamp", status.changedAt().getEpochSecond());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId,
serviceName,
DomainName.of(host),
HttpURL.Path.parse("/status").append(restPath),
Query.empty().add(request.getJDiscRequest().parameters()));
}
private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodes(deploymentId);
}
private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Query query = Query.empty().add(request.getJDiscRequest().parameters());
query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString());
return controller.serviceRegistry().configServer().getServiceNodePage(
deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query);
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
if (controller.system().isPublic()) {
User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
TenantInfo info = controller.tenants().require(tenant, CloudTenant.class)
.info()
.withContact(TenantContact.from(user.name(), user.email()));
controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(info);
controller.tenants().store(lockedTenant);
});
}
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if ( ! versionStatus.isActive(version) && ! isOperator(request))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Inspector buildField = toSlime(request.getData()).get().field("build");
long build = buildField.valid() ? buildField.asLong() : -1;
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
RevisionId revision = build == -1 ? application.get().revisions().last().get().id()
: getRevision(application.get(), build);
Change change = Change.of(revision);
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
private RevisionId getRevision(Application application, long build) {
return application.revisions().withPackage().stream()
.map(ApplicationVersion::id)
.filter(version -> version.number() == build)
.findFirst()
.filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(),
application.id().application(),
build))
.orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found"));
}
private HttpResponse cancelBuild(String tenantName, String applicationName, String build){
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
RevisionId revision = RevisionId.forProduction(Long.parseLong(build));
controller.applications().lockApplicationOrThrow(id, application -> {
controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped())));
for (Instance instance : application.get().instances().values())
if (instance.change().revision().equals(Optional.of(revision)))
controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION);
});
return new MessageResponse("Marked build '" + build + "' as non-deployable");
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.toList();
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.toList();
Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null;
boolean indexedOnly = request.getBooleanProperty("indexedOnly");
controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed);
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) +
(indexedOnly ? ", for indexed types" : "") +
(speed != null ? ", with speed " + speed : ""));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed));
}
private static String toString(ApplicationReindexing.State state) {
return switch (state) {
case PENDING: yield "pending";
case RUNNING: yield "running";
case FAILED: yield "failed";
case SUCCESSFUL: yield "successful";
};
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone()),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
ensureApplicationExists(TenantAndApplicationId.from(id), request);
boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("dryRun", options))
.map(Boolean::valueOf)
.orElse(false);
controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request));
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
boolean forget = request.getBooleanProperty("forget");
if (forget && !isOperator(request))
return ErrorResponse.forbidden("Only operators can forget a tenant");
controller.tenants().delete(TenantName.from(tenantName),
Optional.of(accessControlRequests.credentials(TenantName.from(tenantName),
toSlime(request.getData()).get(),
request.getJDiscRequest())),
forget);
return new MessageResponse("Deleted tenant " + tenantName);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId()))
.filter(run -> ! run.hasEnded())
.ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName()));
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(prodInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(prodInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
ApplicationId toTest = type.isProduction() ? prodInstanceId : id;
if ( ! type.isProduction())
deployments.add(new DeploymentId(toTest, type.zone()));
Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone());
if (deployment == null)
throw new NotExistsException(toTest + " is not deployed in " + type.zone());
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
deployment.version(),
deployment.revision(),
deployment.at(),
controller.routing().readTestRunnerEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null);
if (report != null) {
Cursor cursor = report.get();
boolean force = request.getBooleanProperty("force");
if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) {
throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString());
}
}
Slime requestPayload;
try {
requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes());
} catch (Exception e) {
throw new IllegalArgumentException("Missing or invalid JSON in request content", e);
}
Cursor requestPayloadCursor = requestPayload.get();
String configId = requestPayloadCursor.field("configId").asString();
long expiresAt = requestPayloadCursor.field("expiresAt").asLong();
if (configId.isEmpty()) {
throw new IllegalArgumentException("Missing configId");
}
Cursor artifactsCursor = requestPayloadCursor.field("artifacts");
int artifactEntries = artifactsCursor.entries();
if (artifactEntries == 0) {
throw new IllegalArgumentException("Missing or empty 'artifacts'");
}
Slime dumpRequest = new Slime();
Cursor dumpRequestCursor = dumpRequest.setObject();
dumpRequestCursor.setLong("createdMillis", controller.clock().millis());
dumpRequestCursor.setString("configId", configId);
Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts");
for (int i = 0; i < artifactEntries; i++) {
dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString());
}
if (expiresAt > 0) {
dumpRequestCursor.setLong("expiresAt", expiresAt);
}
Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions");
if (dumpOptionsCursor.children() > 0) {
SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions"));
}
var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest))));
nodeRepository.updateReports(zone, hostname, reportsUpdate);
boolean wait = request.getBooleanProperty("wait");
if (!wait) return new MessageResponse("Request created");
return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname);
}
private HttpResponse getServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname)
.orElseThrow(() -> new NotExistsException("No service dump for node " + hostname));
return new SlimeJsonResponse(report);
}
private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
int pollInterval = 2;
Slime report;
while (true) {
report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get();
Cursor cursor = report.get();
if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) {
break;
}
final Slime copyForLambda = report;
log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda))));
log.fine("Sleeping " + pollInterval + " seconds before checking report status again");
controller.sleeper().sleep(Duration.ofSeconds(pollInterval));
}
return new SlimeJsonResponse(report);
}
private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
Node node;
try {
node = nodeRepository.getNode(zone, hostname);
} catch (IllegalArgumentException e) {
throw new NotExistsException(hostname);
}
ApplicationId app = ApplicationId.from(tenant, application, instance);
ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner"));
if (!app.equals(owner)) {
throw new IllegalArgumentException("Node is not owned by " + app.toFullString());
}
String json = node.reports().get("serviceDump");
if (json == null) return Optional.empty();
return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
try {
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(object.setObject("quota"), usedQuota);
} catch (Exception e) {
log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e)));
}
cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role));
toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess"));
break;
}
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values();
if (instances.isEmpty() && !showOnlyActiveInstances(request))
toSlime(application.id(), applicationArray.addObject(), request);
for (Instance instance : instances) {
if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
} else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(ArchiveAccess archiveAccess, Cursor object) {
archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role));
archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member));
}
private void toSlime(Cursor object, QuotaUsage usage) {
object.setDouble("budgetUsed", usage.rate());
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("peakCpu", utilization.peakCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("peakMemory", utilization.peakMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
utilizationObject.setDouble("peakDisk", utilization.peakDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream()
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment)))
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
if (tenant.type() == Tenant.Type.deleted)
object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new IllegalArgumentException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static boolean showOnlyActiveInstances(HttpRequest request) {
return "true".equals(request.getProperty("activeInstances"));
}
private static boolean includeDeleted(HttpRequest request) {
return "true".equals(request.getProperty("includeDeleted"));
}
private static String tenantType(Tenant tenant) {
return switch (tenant.type()) {
case athenz: yield "ATHENS";
case cloud: yield "CLOUD";
case deleted: yield "DELETED";
};
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry());
}
private RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong();
projectId = projectId == 0 ? 1 : projectId;
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
Optional<String> description = optional("description", submitOptions);
int risk = (int) submitOptions.field("risk").asLong();
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application);
ensureApplicationExists(id, request);
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId);
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), 0),
0);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
return requireZone(ZoneId.from(environment, region));
}
private ZoneId requireZone(ZoneId zone) {
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
return switch (state) {
case in: yield "IN";
case out: yield "OUT";
case unknown: yield "UNKNOWN";
};
}
private static String endpointScopeString(Endpoint.Scope scope) {
return switch (scope) {
case weighted: yield "weighted";
case application: yield "application";
case global: yield "global";
case zone: yield "zone";
};
}
private static String routingMethodString(RoutingMethod method) {
return switch (method) {
case exclusive: yield "exclusive";
case sharedLayer4: yield "sharedLayer4";
};
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) {
if (controller.applications().getApplication(id).isEmpty()) {
if (controller.system().isPublic() || hasOktaContext(request)) {
log.fine("Application does not exist in public, creating: " + id);
var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest());
controller.applications().createApplication(id, credentials);
} else {
log.fine("Application does not exist in hosted, failing: " + id);
throw new IllegalArgumentException("Application does not exist. Create application in Console first.");
}
}
}
private boolean hasOktaContext(HttpRequest request) {
try {
OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context());
return true;
} catch (IllegalArgumentException e) {
return false;
}
}
private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) {
List<ZoneId> productionZones = spec.zones().stream()
.filter(z -> z.region().isPresent())
.map(z -> ZoneId.from(z.environment(), z.region().get()))
.toList();
return deployments.stream()
.sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone())))
.collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
return switch (request.getMethod()) {
case GET: yield handleGET(path, request);
case PUT: yield handlePUT(path, request);
case POST: yield handlePOST(path, request);
case PATCH: yield handlePATCH(path, request);
case DELETE: yield handleDELETE(path, request);
case OPTIONS: yield handleOPTIONS();
default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
};
}
catch (RestApiException.Forbidden e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (RestApiException.Unauthorized e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return switch (e.code()) {
case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR: yield ErrorResponses.logThrowing(request, log, e);
default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
};
}
catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse accessRequests(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var accessControlService = controller.serviceRegistry().accessControlService();
var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant);
var managedAccess = accessControlService.getManagedAccess(tenant);
var slime = new Slime();
var cursor = slime.setObject();
cursor.setBool("managedAccess", managedAccess);
accessRoleInformation.getPendingRequest()
.ifPresent(membershipRequest -> {
var requestCursor = cursor.setObject("pendingRequest");
requestCursor.setString("requestTime", membershipRequest.getCreationTime());
requestCursor.setString("reason", membershipRequest.getReason());
});
var auditLogCursor = cursor.setArray("auditLog");
accessRoleInformation.getAuditLog()
.forEach(auditLogEntry -> {
var entryCursor = auditLogCursor.addObject();
entryCursor.setString("created", auditLogEntry.getCreationTime());
entryCursor.setString("approver", auditLogEntry.getApprover());
entryCursor.setString("reason", auditLogEntry.getReason());
entryCursor.setString("status", auditLogEntry.getAction());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse requestSshAccess(String tenantName, HttpRequest request) {
if (!isOperator(request)) {
return ErrorResponse.forbidden("Only operators are allowed to request ssh access");
}
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only request access for cloud tenants");
controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName));
return new MessageResponse("OK");
}
private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var inspector = toSlime(request.getData()).get();
var expiry = inspector.field("expiry").valid() ?
Instant.ofEpochMilli(inspector.field("expiry").asLong()) :
Instant.now().plus(1, ChronoUnit.DAYS);
var approve = inspector.field("approve").asBool();
controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve);
return new MessageResponse("OK");
}
private HttpResponse addManagedAccess(String tenantName) {
return setManagedAccess(tenantName, true);
}
private HttpResponse removeManagedAccess(String tenantName) {
return setManagedAccess(tenantName, false);
}
private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only set access privel for cloud tenants");
controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess);
var slime = new Slime();
slime.setObject().setBool("managedAccess", managedAccess);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> handler.apply((CloudTenant) tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var contact = root.setObject("contact");
contact.setString("name", info.contact().name());
contact.setString("email", info.contact().email());
var tenant = root.setObject("tenant");
tenant.setString("company", info.name());
tenant.setString("website", info.website());
toSlime(info.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) {
return controller.tenants().get(tenantName)
.map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get()))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var mergedContact = TenantContact.empty()
.withName(getString(inspector.field("contact").field("name"), info.contact().name()))
.withEmail(getString(inspector.field("contact").field("email"), info.contact().email()));
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address());
var mergedInfo = info
.withName(getString(inspector.field("tenant").field("name"), info.name()))
.withWebsite(getString(inspector.field("tenant").field("website"), info.website()))
.withContact(mergedContact)
.withAddress(mergedAddress);
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var billingContact = info.billingContact();
var contact = root.setObject("contact");
contact.setString("name", billingContact.contact().name());
contact.setString("email", billingContact.contact().email());
contact.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var contact = info.billingContact().contact();
var address = info.billingContact().address();
var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address());
var mergedBilling = info.billingContact()
.withContact(mergedContact)
.withAddress(mergedAddress);
var mergedInfo = info.withBilling(mergedBilling);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
toSlime(cloudTenant.info().contacts(), root);
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) {
var mergedInfo = cloudTenant.info()
.withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts()));
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private void validateMergedTenantInfo(TenantInfo mergedInfo) {
if (mergedInfo.contact().name().isBlank()) {
throw new IllegalArgumentException("'contactName' cannot be empty");
}
if (mergedInfo.contact().email().isBlank()) {
throw new IllegalArgumentException("'contactEmail' cannot be empty");
}
if (! mergedInfo.contact().email().contains("@")) {
throw new IllegalArgumentException("'contactEmail' needs to be an email address");
}
if (! mergedInfo.website().isBlank()) {
try {
new URL(mergedInfo.website());
} catch (MalformedURLException e) {
throw new IllegalArgumentException("'website' needs to be a valid address");
}
}
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
Cursor contactsCursor = parentCursor.setArray("contacts");
contacts.all().forEach(contact -> {
Cursor contactCursor = contactsCursor.addObject();
Cursor audiencesArray = contactCursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
contactCursor.setString("email", email.email());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
});
}
private static TenantContacts.Audience fromAudience(String value) {
return switch (value) {
case "tenant": yield TenantContacts.Audience.TENANT;
case "notifications": yield TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
};
}
private static String toAudience(TenantContacts.Audience audience) {
return switch (audience) {
case TENANT: yield "tenant";
case NOTIFICATIONS: yield "notifications";
};
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString().trim() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantContact mergedContact = TenantContact.empty()
.withName(getString(insp.field("contactName"), oldInfo.contact().name()))
.withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email()));
TenantInfo mergedInfo = TenantInfo.empty()
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.website()))
.withContact(mergedContact)
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()))
.withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts()));
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) {
if (!insp.valid()) return oldAddress;
TenantAddress address = TenantAddress.empty()
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code()))
.withAddress(getString(insp.field("addressLines"), oldAddress.address()));
List<String> fields = List.of(address.address(),
address.code(),
address.country(),
address.city(),
address.region());
if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank))
return address;
throw new IllegalArgumentException("All address fields must be set");
}
private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) {
if (!insp.valid()) return oldContact;
String email = getString(insp.field("email"), oldContact.email());
if (!email.isBlank() && !email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return TenantContact.empty()
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()));
}
private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) {
if (!insp.valid()) return oldContact;
return TenantBilling.empty()
.withContact(updateTenantInfoContact(insp, oldContact.contact()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) {
if (!insp.valid()) return oldContacts;
List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> {
String email = inspector.field("email").asString().trim();
List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> fromAudience(audience.asString()))
.toList();
if (!email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return new TenantContacts.EmailContact(audiences, email);
}).toList();
return new TenantContacts(contacts);
}
private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) {
boolean productionOnly = showOnlyProductionInstances(request);
boolean excludeMessages = "true".equals(request.getProperty("excludeMessages"));
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
tenant.map(t -> Stream.of(TenantName.from(t)))
.orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream())
.flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream())
.filter(notification ->
propertyEquals(request, "application", ApplicationName::from, notification.source().application()) &&
propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) &&
propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) &&
propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) &&
propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) &&
propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level())))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages));
return new SlimeJsonResponse(slime);
}
private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) {
return Optional.ofNullable(request.getProperty(property))
.map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get()))
.orElse(true);
}
private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
if (!excludeMessages) {
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
}
if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value());
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
return switch (type) {
case submission, applicationPackage: yield "applicationPackage";
case testPackage: yield "testPackage";
case deployment: yield "deployment";
case feedBlock: yield "feedBlock";
case reindex: yield "reindex";
};
}
private static String notificationLevelAsString(Notification.Level level) {
return switch (level) {
case info: yield "info";
case warning: yield "warning";
case error: yield "error";
};
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
getTenantOrThrow(tenantName);
List<Application> applications = applicationName.isEmpty() ?
controller.applications().asList(tenant) :
controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get()))
.map(List::of)
.orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist"));
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : applications) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
ZoneId zone = type.zone();
RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision();
byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse devApplicationPackageDiff(RunId runId) {
DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone());
return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
final long build;
String requestedBuild = request.getProperty("build");
if (requestedBuild != null) {
if (requestedBuild.equals("latestDeployed")) {
build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision()
.map(RevisionId::number)
.orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication));
} else {
try {
build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("invalid value for request parameter 'build'", e);
}
}
} else {
build = controller.applications().requireApplication(tenantAndApplication).revisions().last()
.map(version -> version.id().number())
.orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication));
}
RevisionId revision = RevisionId.forProduction(build);
boolean tests = request.getBooleanProperty("tests");
byte[] applicationPackage = tests ?
controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) :
controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision);
String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip";
return new ZipResponse(filename, applicationPackage);
}
private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) {
Slime slime = new Slime();
OptionalInt allowMajor = OptionalInt.empty();
if (allowMajorParam != null) {
try {
allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e);
}
}
Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor);
slime.setObject().setString("compileVersion", compileVersion.toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
if (!applicationId.tenant().equals(TenantName.from(tenantName)))
return ErrorResponse.badRequest("Invalid application id");
var zoneId = requireZone(ZoneId.from(request.getProperty("zone")));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("AWS archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeAwsArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var member = mandatory("member", data).asString();
if (member.isBlank()) {
return ErrorResponse.badRequest("GCP archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + ".");
}
private HttpResponse removeGcpArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id));
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor));
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.down());
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
nodeObject.setString("group", node.group());
nodeObject.setLong("index", node.index());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
return switch (state) {
case failed: yield "failed";
case parked: yield "parked";
case dirty: yield "dirty";
case ready: yield "ready";
case active: yield "active";
case inactive: yield "inactive";
case reserved: yield "reserved";
case provisioned: yield "provisioned";
case breakfixed: yield "breakfixed";
case deprovisioned: yield "deprovisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
};
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
return switch (type) {
case admin: yield "admin";
case content: yield "content";
case container: yield "container";
case combined: yield "combined";
case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
};
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
return switch (diskSpeed) {
case fast : yield "fast";
case slow : yield "slow";
case any : yield "any";
};
}
private static String valueOf(NodeResources.StorageType storageType) {
return switch (storageType) {
case remote : yield "remote";
case local : yield "local";
case any : yield "any";
};
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
@Override
public long maxPendingBytes() {
return 1 << 26;
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
var from = Optional.ofNullable(request.getProperty("from"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.EPOCH);
var until = Optional.ofNullable(request.getProperty("until"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.now(controller.clock()));
var application = ApplicationId.from(tenantName, applicationName, instanceName);
var zone = requireZone(environment, region);
var deployment = new DeploymentId(application, zone);
var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment);
var slime = new Slime();
var root = slime.setObject();
for (var entry : events.entrySet()) {
var serviceRoot = root.setArray(entry.getKey().clusterId().value());
scalingEventsToSlime(entry.getValue(), serviceRoot);
}
return new SlimeJsonResponse(slime);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
boolean upgradeRevision = ! requestObject.field("skipRevision").asBool();
boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") +
(upgradeRevision ? "" : "revision") +
( ! upgradeRevision && ! upgradePlatform ? " and " : "") +
(upgradePlatform ? "" : "platform") +
( ! upgradeRevision || ! upgradePlatform ? " upgrade" : "");
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id + suppressedUpgrades);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), status.application());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application());
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
addRotationId(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void addRotationId(Cursor object, Instance instance) {
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.revisions().last().ifPresent(version -> {
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
addRotationId(object, instance);
List<Deployment> deployments = application.deploymentSpec().instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
Stream.concat(status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment()),
controller.jobController().active(instance.id()).stream()
.map(run -> run.id().job())
.filter(job -> job.type().environment().isManuallyDeployed()))
.map(job -> job.type().zone())
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change, Application application) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision)));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
for (var endpoint : zoneEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application)
.targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
for (var endpoint : declaredEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", application.revisions().get(deployment.revision()).stringId());
response.setLong("build", deployment.revision().number());
Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment);
response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli()));
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
if (!deployment.zone().environment().isManuallyDeployed()) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone()));
Optional.ofNullable(status.jobSteps().get(jobId))
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision()));
if ( ! status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else
response.setString("status", "running");
});
} else {
var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId()));
deploymentRun.ifPresent(run -> {
response.setString("status", run.hasEnded() ? "complete" : "running");
});
}
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
DeploymentId deploymentId = new DeploymentId(instance.id(), zone);
RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant;
RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out;
controller.routing().of(deploymentId).setRoutingStatus(status, agent);
return new MessageResponse(Text.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId())
.requiresRotation()
.primary();
if (primaryEndpoint.isPresent()) {
DeploymentRoutingContext context = controller.routing().of(deploymentId);
RoutingStatus status = context.routingStatus();
array.addString(primaryEndpoint.get().upstreamName(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.value().name());
statusObject.setString("reason", "");
statusObject.setString("agent", status.agent().name());
statusObject.setLong("timestamp", status.changedAt().getEpochSecond());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId,
serviceName,
DomainName.of(host),
HttpURL.Path.parse("/status").append(restPath),
Query.empty().add(request.getJDiscRequest().parameters()));
}
private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodes(deploymentId);
}
private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Query query = Query.empty().add(request.getJDiscRequest().parameters());
query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString());
return controller.serviceRegistry().configServer().getServiceNodePage(
deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query);
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
if (controller.system().isPublic()) {
User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
TenantInfo info = controller.tenants().require(tenant, CloudTenant.class)
.info()
.withContact(TenantContact.from(user.name(), user.email()));
controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(info);
controller.tenants().store(lockedTenant);
});
}
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if ( ! versionStatus.isActive(version) && ! isOperator(request))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Inspector buildField = toSlime(request.getData()).get().field("build");
long build = buildField.valid() ? buildField.asLong() : -1;
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
RevisionId revision = build == -1 ? application.get().revisions().last().get().id()
: getRevision(application.get(), build);
Change change = Change.of(revision);
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
private RevisionId getRevision(Application application, long build) {
return application.revisions().withPackage().stream()
.map(ApplicationVersion::id)
.filter(version -> version.number() == build)
.findFirst()
.filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(),
application.id().application(),
build))
.orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found"));
}
private HttpResponse cancelBuild(String tenantName, String applicationName, String build){
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
RevisionId revision = RevisionId.forProduction(Long.parseLong(build));
controller.applications().lockApplicationOrThrow(id, application -> {
controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped())));
for (Instance instance : application.get().instances().values())
if (instance.change().revision().equals(Optional.of(revision)))
controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION);
});
return new MessageResponse("Marked build '" + build + "' as non-deployable");
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.toList();
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.toList();
Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null;
boolean indexedOnly = request.getBooleanProperty("indexedOnly");
controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed);
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) +
(indexedOnly ? ", for indexed types" : "") +
(speed != null ? ", with speed " + speed : ""));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed));
}
private static String toString(ApplicationReindexing.State state) {
return switch (state) {
case PENDING: yield "pending";
case RUNNING: yield "running";
case FAILED: yield "failed";
case SUCCESSFUL: yield "successful";
};
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone()),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
ensureApplicationExists(TenantAndApplicationId.from(id), request);
boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("dryRun", options))
.map(Boolean::valueOf)
.orElse(false);
controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request));
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
boolean forget = request.getBooleanProperty("forget");
if (forget && !isOperator(request))
return ErrorResponse.forbidden("Only operators can forget a tenant");
controller.tenants().delete(TenantName.from(tenantName),
Optional.of(accessControlRequests.credentials(TenantName.from(tenantName),
toSlime(request.getData()).get(),
request.getJDiscRequest())),
forget);
return new MessageResponse("Deleted tenant " + tenantName);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId()))
.filter(run -> ! run.hasEnded())
.ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName()));
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(prodInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(prodInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
ApplicationId toTest = type.isProduction() ? prodInstanceId : id;
if ( ! type.isProduction())
deployments.add(new DeploymentId(toTest, type.zone()));
Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone());
if (deployment == null)
throw new NotExistsException(toTest + " is not deployed in " + type.zone());
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
deployment.version(),
deployment.revision(),
deployment.at(),
controller.routing().readTestRunnerEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null);
if (report != null) {
Cursor cursor = report.get();
boolean force = request.getBooleanProperty("force");
if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) {
throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString());
}
}
Slime requestPayload;
try {
requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes());
} catch (Exception e) {
throw new IllegalArgumentException("Missing or invalid JSON in request content", e);
}
Cursor requestPayloadCursor = requestPayload.get();
String configId = requestPayloadCursor.field("configId").asString();
long expiresAt = requestPayloadCursor.field("expiresAt").asLong();
if (configId.isEmpty()) {
throw new IllegalArgumentException("Missing configId");
}
Cursor artifactsCursor = requestPayloadCursor.field("artifacts");
int artifactEntries = artifactsCursor.entries();
if (artifactEntries == 0) {
throw new IllegalArgumentException("Missing or empty 'artifacts'");
}
Slime dumpRequest = new Slime();
Cursor dumpRequestCursor = dumpRequest.setObject();
dumpRequestCursor.setLong("createdMillis", controller.clock().millis());
dumpRequestCursor.setString("configId", configId);
Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts");
for (int i = 0; i < artifactEntries; i++) {
dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString());
}
if (expiresAt > 0) {
dumpRequestCursor.setLong("expiresAt", expiresAt);
}
Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions");
if (dumpOptionsCursor.children() > 0) {
SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions"));
}
var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest))));
nodeRepository.updateReports(zone, hostname, reportsUpdate);
boolean wait = request.getBooleanProperty("wait");
if (!wait) return new MessageResponse("Request created");
return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname);
}
private HttpResponse getServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname)
.orElseThrow(() -> new NotExistsException("No service dump for node " + hostname));
return new SlimeJsonResponse(report);
}
private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
int pollInterval = 2;
Slime report;
while (true) {
report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get();
Cursor cursor = report.get();
if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) {
break;
}
final Slime copyForLambda = report;
log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda))));
log.fine("Sleeping " + pollInterval + " seconds before checking report status again");
controller.sleeper().sleep(Duration.ofSeconds(pollInterval));
}
return new SlimeJsonResponse(report);
}
private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
Node node;
try {
node = nodeRepository.getNode(zone, hostname);
} catch (IllegalArgumentException e) {
throw new NotExistsException(hostname);
}
ApplicationId app = ApplicationId.from(tenant, application, instance);
ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner"));
if (!app.equals(owner)) {
throw new IllegalArgumentException("Node is not owned by " + app.toFullString());
}
String json = node.reports().get("serviceDump");
if (json == null) return Optional.empty();
return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
try {
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(object.setObject("quota"), usedQuota);
} catch (Exception e) {
log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e)));
}
cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role));
toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess"));
break;
}
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values();
if (instances.isEmpty() && !showOnlyActiveInstances(request))
toSlime(application.id(), applicationArray.addObject(), request);
for (Instance instance : instances) {
if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
} else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(ArchiveAccess archiveAccess, Cursor object) {
archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role));
archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member));
}
private void toSlime(Cursor object, QuotaUsage usage) {
object.setDouble("budgetUsed", usage.rate());
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("peakCpu", utilization.peakCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("peakMemory", utilization.peakMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
utilizationObject.setDouble("peakDisk", utilization.peakDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream()
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment)))
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
if (tenant.type() == Tenant.Type.deleted)
object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new IllegalArgumentException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static boolean showOnlyActiveInstances(HttpRequest request) {
return "true".equals(request.getProperty("activeInstances"));
}
private static boolean includeDeleted(HttpRequest request) {
return "true".equals(request.getProperty("includeDeleted"));
}
private static String tenantType(Tenant tenant) {
return switch (tenant.type()) {
case athenz: yield "ATHENS";
case cloud: yield "CLOUD";
case deleted: yield "DELETED";
};
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry());
}
private RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong();
projectId = projectId == 0 ? 1 : projectId;
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
Optional<String> description = optional("description", submitOptions);
int risk = (int) submitOptions.field("risk").asLong();
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application);
ensureApplicationExists(id, request);
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId);
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), 0),
0);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
return requireZone(ZoneId.from(environment, region));
}
private ZoneId requireZone(ZoneId zone) {
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
return switch (state) {
case in: yield "IN";
case out: yield "OUT";
case unknown: yield "UNKNOWN";
};
}
private static String endpointScopeString(Endpoint.Scope scope) {
return switch (scope) {
case weighted: yield "weighted";
case application: yield "application";
case global: yield "global";
case zone: yield "zone";
};
}
private static String routingMethodString(RoutingMethod method) {
return switch (method) {
case exclusive: yield "exclusive";
case sharedLayer4: yield "sharedLayer4";
};
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) {
if (controller.applications().getApplication(id).isEmpty()) {
if (controller.system().isPublic() || hasOktaContext(request)) {
log.fine("Application does not exist in public, creating: " + id);
var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest());
controller.applications().createApplication(id, credentials);
} else {
log.fine("Application does not exist in hosted, failing: " + id);
throw new IllegalArgumentException("Application does not exist. Create application in Console first.");
}
}
}
private boolean hasOktaContext(HttpRequest request) {
try {
OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context());
return true;
} catch (IllegalArgumentException e) {
return false;
}
}
private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) {
List<ZoneId> productionZones = spec.zones().stream()
.filter(z -> z.region().isPresent())
.map(z -> ZoneId.from(z.environment(), z.region().get()))
.toList();
return deployments.stream()
.sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone())))
.collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
} |
This PR doesn't set this value. | public static PrepareParams fromHttpRequest(HttpRequest request, TenantName tenant, Duration barrierTimeout) {
return new Builder().ignoreValidationErrors(request.getBooleanProperty(IGNORE_VALIDATION_PARAM_NAME))
.dryRun(request.getBooleanProperty(DRY_RUN_PARAM_NAME))
.verbose(request.getBooleanProperty(VERBOSE_PARAM_NAME))
.timeoutBudget(SessionHandler.getTimeoutBudget(request, barrierTimeout))
.applicationId(createApplicationId(request, tenant))
.tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME)))
.vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
.containerEndpoints(request.getProperty(CONTAINER_ENDPOINTS_PARAM_NAME))
.endpointCertificateMetadata(request.getProperty(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME))
.dockerImageRepository(request.getProperty(DOCKER_IMAGE_REPOSITORY))
.athenzDomain(request.getProperty(ATHENZ_DOMAIN))
.quota(request.getProperty(QUOTA_PARAM_NAME))
.tenantSecretStores(request.getProperty(TENANT_SECRET_STORES_PARAM_NAME))
.force(request.getBooleanProperty(FORCE_PARAM_NAME))
.waitForResourcesInPrepare(request.getBooleanProperty(WAIT_FOR_RESOURCES_IN_PREPARE))
.build();
} | .tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME))) | public static PrepareParams fromHttpRequest(HttpRequest request, TenantName tenant, Duration barrierTimeout) {
return new Builder().ignoreValidationErrors(request.getBooleanProperty(IGNORE_VALIDATION_PARAM_NAME))
.dryRun(request.getBooleanProperty(DRY_RUN_PARAM_NAME))
.verbose(request.getBooleanProperty(VERBOSE_PARAM_NAME))
.timeoutBudget(SessionHandler.getTimeoutBudget(request, barrierTimeout))
.applicationId(createApplicationId(request, tenant))
.tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME)))
.vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
.containerEndpoints(request.getProperty(CONTAINER_ENDPOINTS_PARAM_NAME))
.endpointCertificateMetadata(request.getProperty(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME))
.dockerImageRepository(request.getProperty(DOCKER_IMAGE_REPOSITORY))
.athenzDomain(request.getProperty(ATHENZ_DOMAIN))
.quota(request.getProperty(QUOTA_PARAM_NAME))
.tenantSecretStores(request.getProperty(TENANT_SECRET_STORES_PARAM_NAME))
.force(request.getBooleanProperty(FORCE_PARAM_NAME))
.waitForResourcesInPrepare(request.getBooleanProperty(WAIT_FOR_RESOURCES_IN_PREPARE))
.build();
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private boolean force = false;
private boolean waitForResourcesInPrepare = false;
private ApplicationId applicationId = null;
private Tags tags = Tags.empty();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = null;
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
private Optional<Quota> quota = Optional.empty();
private List<TenantSecretStore> tenantSecretStores = List.of();
private List<X509Certificate> operatorCertificates = List.of();
private Optional<CloudAccount> cloudAccount = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder tags(Tags tags) {
this.tags = tags;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
this.containerEndpoints = (serialized == null)
? List.of()
: ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized));
return this;
}
public Builder containerEndpointList(List<ContainerEndpoint> endpoints) {
this.containerEndpoints = endpoints;
return this;
}
public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) {
this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata);
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
: Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder dockerImageRepository(String dockerImageRepository) {
this.dockerImageRepository = (dockerImageRepository == null)
? Optional.empty()
: Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
this.dockerImageRepository = Optional.ofNullable(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain);
return this;
}
public Builder quota(Quota quota) {
this.quota = Optional.ofNullable(quota);
return this;
}
public Builder quota(String serialized) {
this.quota = (serialized == null)
? Optional.empty()
: Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder tenantSecretStores(String serialized) {
List<TenantSecretStore> secretStores = (serialized == null)
? List.of()
: TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get());
return tenantSecretStores(secretStores);
}
public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) {
this.tenantSecretStores = tenantSecretStores;
return this;
}
public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) {
this.waitForResourcesInPrepare = waitForResourcesInPrepare;
return this;
}
public Builder force(boolean force) {
this.force = force;
return this;
}
public Builder operatorCertificates(List<X509Certificate> operatorCertificates) {
this.operatorCertificates = List.copyOf(operatorCertificates);
return this;
}
public Builder cloudAccount(CloudAccount cloudAccount) {
this.cloudAccount = Optional.ofNullable(cloudAccount);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId,
tags,
timeoutBudget,
ignoreValidationErrors,
dryRun,
verbose,
isBootstrap,
vespaVersion,
containerEndpoints,
endpointCertificateMetadata,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
force,
waitForResourcesInPrepare,
operatorCertificates,
cloudAccount);
}
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private boolean force = false;
private boolean waitForResourcesInPrepare = false;
private ApplicationId applicationId = null;
private Tags tags = Tags.empty();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = null;
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
private Optional<Quota> quota = Optional.empty();
private List<TenantSecretStore> tenantSecretStores = List.of();
private List<X509Certificate> operatorCertificates = List.of();
private Optional<CloudAccount> cloudAccount = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder tags(Tags tags) {
this.tags = tags;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
this.containerEndpoints = (serialized == null)
? List.of()
: ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized));
return this;
}
public Builder containerEndpointList(List<ContainerEndpoint> endpoints) {
this.containerEndpoints = endpoints;
return this;
}
public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) {
this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata);
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
: Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder dockerImageRepository(String dockerImageRepository) {
this.dockerImageRepository = (dockerImageRepository == null)
? Optional.empty()
: Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
this.dockerImageRepository = Optional.ofNullable(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain);
return this;
}
public Builder quota(Quota quota) {
this.quota = Optional.ofNullable(quota);
return this;
}
public Builder quota(String serialized) {
this.quota = (serialized == null)
? Optional.empty()
: Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder tenantSecretStores(String serialized) {
List<TenantSecretStore> secretStores = (serialized == null)
? List.of()
: TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get());
return tenantSecretStores(secretStores);
}
public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) {
this.tenantSecretStores = tenantSecretStores;
return this;
}
public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) {
this.waitForResourcesInPrepare = waitForResourcesInPrepare;
return this;
}
public Builder force(boolean force) {
this.force = force;
return this;
}
public Builder operatorCertificates(List<X509Certificate> operatorCertificates) {
this.operatorCertificates = List.copyOf(operatorCertificates);
return this;
}
public Builder cloudAccount(CloudAccount cloudAccount) {
this.cloudAccount = Optional.ofNullable(cloudAccount);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId,
tags,
timeoutBudget,
ignoreValidationErrors,
dryRun,
verbose,
isBootstrap,
vespaVersion,
containerEndpoints,
endpointCertificateMetadata,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
force,
waitForResourcesInPrepare,
operatorCertificates,
cloudAccount);
}
} |
I don't know what this is for, hopefully it is just wasted work if there are variants? | private void preProcessAndPopulateCache() {
FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile));
if (servicesXml.exists())
try {
new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")),
new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8),
InstanceName.defaultName(),
Environment.prod,
RegionName.defaultName(),
Tags.empty())
.run();
}
catch (IllegalArgumentException e) {
throw e;
}
catch (Exception e) {
throw new IllegalArgumentException(e);
}
} | Tags.empty()) | private void preProcessAndPopulateCache() {
FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile));
if (servicesXml.exists())
try {
new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")),
new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8),
InstanceName.defaultName(),
Environment.prod,
RegionName.defaultName(),
Tags.empty())
.run();
}
catch (IllegalArgumentException e) {
throw e;
}
catch (Exception e) {
throw new IllegalArgumentException(e);
}
} | class ApplicationPackage {
private static final String trustedCertificatesFile = "security/clients.pem";
private static final String buildMetaFile = "build-meta.json";
static final String deploymentFile = "deployment.xml";
private static final String validationOverridesFile = "validation-overrides.xml";
static final String servicesFile = "services.xml";
private final String contentHash;
private final String bundleHash;
private final byte[] zippedContent;
private final DeploymentSpec deploymentSpec;
private final ValidationOverrides validationOverrides;
private final ZipArchiveCache files;
private final Optional<Version> compileVersion;
private final Optional<Instant> buildTime;
private final Optional<Version> parentVersion;
private final List<X509Certificate> trustedCertificates;
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
*/
public ApplicationPackage(byte[] zippedContent) {
this(zippedContent, false);
}
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
* If 'requireFiles' is true, files needed by deployment orchestration must be present.
*/
@SuppressWarnings("deprecation")
public ApplicationPackage(byte[] zippedContent, boolean requireFiles) {
this.zippedContent = Objects.requireNonNull(zippedContent, "The application package content cannot be null");
this.contentHash = Hashing.sha1().hashBytes(zippedContent).toString();
this.files = new ZipArchiveCache(zippedContent, Set.of(deploymentFile, validationOverridesFile, servicesFile, buildMetaFile, trustedCertificatesFile));
Optional<DeploymentSpec> deploymentSpec = files.get(deploymentFile).map(bytes -> new String(bytes, UTF_8)).map(DeploymentSpec::fromXml);
if (requireFiles && deploymentSpec.isEmpty())
throw new IllegalArgumentException("Missing required file '" + deploymentFile + "'");
this.deploymentSpec = deploymentSpec.orElse(DeploymentSpec.empty);
this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty);
Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get);
this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString())));
this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong())));
this.parentVersion = buildMetaObject.flatMap(object -> parse(object, "parentVersion", field -> Version.fromString(field.asString())));
this.trustedCertificates = files.get(trustedCertificatesFile).map(bytes -> X509CertificateUtils.certificateListFromPem(new String(bytes, UTF_8))).orElse(List.of());
this.bundleHash = calculateBundleHash(zippedContent);
preProcessAndPopulateCache();
}
/** Returns a copy of this with the given certificate appended. */
public ApplicationPackage withTrustedCertificate(X509Certificate certificate) {
List<X509Certificate> trustedCertificates = new ArrayList<>(this.trustedCertificates);
trustedCertificates.add(certificate);
byte[] certificatesBytes = X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8);
ByteArrayOutputStream modified = new ByteArrayOutputStream(zippedContent.length + certificatesBytes.length);
ZipEntries.transferAndWrite(modified, new ByteArrayInputStream(zippedContent), trustedCertificatesFile, certificatesBytes);
return new ApplicationPackage(modified.toByteArray());
}
/** Returns a hash of the content of this package */
public String hash() { return contentHash; }
/** Hash of all files and settings that influence what is deployed to config servers. */
public String bundleHash() {
return bundleHash;
}
/** Returns the content of this package. The content <b>must not</b> be modified. */
public byte[] zippedContent() { return zippedContent; }
/**
* Returns the deployment spec from the deployment.xml file of the package content.<br>
* This is the DeploymentSpec.empty instance if this package does not contain a deployment.xml file.<br>
* <em>NB: <strong>Always</strong> read deployment spec from the {@link Application}, for deployment orchestration.</em>
*/
public DeploymentSpec deploymentSpec() { return deploymentSpec; }
/**
* Returns the validation overrides from the validation-overrides.xml file of the package content.
* This is the ValidationOverrides.empty instance if this package does not contain a validation-overrides.xml file.
*/
public ValidationOverrides validationOverrides() { return validationOverrides; }
/** Returns the platform version which package was compiled against, if known. */
public Optional<Version> compileVersion() { return compileVersion; }
/** Returns the time this package was built, if known. */
public Optional<Instant> buildTime() { return buildTime; }
/** Returns the parent version used to compile the package, if known. */
public Optional<Version> parentVersion() { return parentVersion; }
/** Returns the list of certificates trusted by this application, or an empty list if no trust configured. */
public List<X509Certificate> trustedCertificates() {
return trustedCertificates;
}
private static <Type> Optional<Type> parse(Inspector buildMetaObject, String fieldName, Function<Inspector, Type> mapper) {
Inspector field = buildMetaObject.field(fieldName);
if ( ! field.valid() || field.type() == NIX)
return Optional.empty();
try {
return Optional.of(mapper.apply(buildMetaObject.field(fieldName)));
}
catch (RuntimeException e) {
throw new IllegalArgumentException("Failed parsing \"" + fieldName + "\" in '" + buildMetaFile + "': " + Exceptions.toMessageString(e));
}
}
/** Creates a valid application package that will remove all application's deployments */
public static ApplicationPackage deploymentRemoval() {
return new ApplicationPackage(filesZip(Map.of(validationOverridesFile, allValidationOverrides().xmlForm().getBytes(UTF_8),
deploymentFile, DeploymentSpec.empty.xmlForm().getBytes(UTF_8))));
}
/** Returns a zip containing meta data about deployments of this package by the given job. */
public byte[] metaDataZip() {
return cacheZip();
}
private byte[] cacheZip() {
return filesZip(files.cache.entrySet().stream()
.filter(entry -> entry.getValue().isPresent())
.collect(toMap(entry -> entry.getKey().toString(),
entry -> entry.getValue().get())));
}
public static byte[] filesZip(Map<String, byte[]> files) {
try (ZipBuilder zipBuilder = new ZipBuilder(files.values().stream().mapToInt(bytes -> bytes.length).sum() + 512)) {
files.forEach(zipBuilder::add);
zipBuilder.close();
return zipBuilder.toByteArray();
}
}
private static ValidationOverrides allValidationOverrides() {
String until = DateTimeFormatter.ISO_LOCAL_DATE.format(Instant.now().plus(Duration.ofDays(25)).atZone(ZoneOffset.UTC));
StringBuilder validationOverridesContents = new StringBuilder(1000);
validationOverridesContents.append("<validation-overrides version=\"1.0\">\n");
for (ValidationId validationId: ValidationId.values())
validationOverridesContents.append("\t<allow until=\"").append(until).append("\">").append(validationId.value()).append("</allow>\n");
validationOverridesContents.append("</validation-overrides>\n");
return ValidationOverrides.fromXml(validationOverridesContents.toString());
}
@SuppressWarnings("deprecation")
private String calculateBundleHash(byte[] zippedContent) {
Predicate<String> entryMatcher = name -> ! name.endsWith(deploymentFile) && ! name.endsWith(buildMetaFile);
SortedMap<String, Long> crcByEntry = new TreeMap<>();
Options options = Options.standard().pathPredicate(entryMatcher);
ArchiveFile file;
try (ArchiveStreamReader reader = ArchiveStreamReader.ofZip(new ByteArrayInputStream(zippedContent), options)) {
OutputStream discard = OutputStream.nullOutputStream();
while ((file = reader.readNextTo(discard)) != null) {
crcByEntry.put(file.path().toString(), file.crc32().orElse(-1));
}
}
Funnel<SortedMap<String, Long>> funnel = (from, into) -> from.forEach((key, value) -> {
into.putBytes(key.getBytes());
into.putLong(value);
});
return Hashing.sha1().newHasher()
.putObject(crcByEntry, funnel)
.putInt(deploymentSpec.deployableHashCode())
.hash().toString();
}
@SuppressWarnings("deprecation")
public static String calculateHash(byte[] bytes) {
return Hashing.sha1().newHasher()
.putBytes(bytes)
.hash().toString();
}
/** Maps normalized paths to cached content read from a zip archive. */
private static class ZipArchiveCache {
/** Max size of each extracted file */
private static final int maxSize = 10 << 20;
private final byte[] zip;
private final Map<Path, Optional<byte[]>> cache;
public ZipArchiveCache(byte[] zip, Collection<String> prePopulated) {
this.zip = zip;
this.cache = new ConcurrentSkipListMap<>();
this.cache.putAll(read(prePopulated));
}
public Optional<byte[]> get(String path) {
return get(Paths.get(path));
}
public Optional<byte[]> get(Path path) {
return cache.computeIfAbsent(path.normalize(), read(List.of(path.normalize().toString()))::get);
}
public FileSystemWrapper wrapper() {
return FileSystemWrapper.ofFiles(Path.of("./"),
path -> get(path).isPresent(),
path -> get(path).orElseThrow(() -> new NoSuchFileException(path.toString())));
}
private Map<Path, Optional<byte[]>> read(Collection<String> names) {
var entries = ZipEntries.from(zip,
name -> names.contains(name),
maxSize,
true)
.asList().stream()
.collect(toMap(entry -> Paths.get(entry.name()).normalize(),
ZipEntries.ZipEntryWithContent::content));
names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty()));
return entries;
}
}
} | class ApplicationPackage {
private static final String trustedCertificatesFile = "security/clients.pem";
private static final String buildMetaFile = "build-meta.json";
static final String deploymentFile = "deployment.xml";
private static final String validationOverridesFile = "validation-overrides.xml";
static final String servicesFile = "services.xml";
private final String contentHash;
private final String bundleHash;
private final byte[] zippedContent;
private final DeploymentSpec deploymentSpec;
private final ValidationOverrides validationOverrides;
private final ZipArchiveCache files;
private final Optional<Version> compileVersion;
private final Optional<Instant> buildTime;
private final Optional<Version> parentVersion;
private final List<X509Certificate> trustedCertificates;
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
*/
public ApplicationPackage(byte[] zippedContent) {
this(zippedContent, false);
}
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
* If 'requireFiles' is true, files needed by deployment orchestration must be present.
*/
@SuppressWarnings("deprecation")
public ApplicationPackage(byte[] zippedContent, boolean requireFiles) {
this.zippedContent = Objects.requireNonNull(zippedContent, "The application package content cannot be null");
this.contentHash = Hashing.sha1().hashBytes(zippedContent).toString();
this.files = new ZipArchiveCache(zippedContent, Set.of(deploymentFile, validationOverridesFile, servicesFile, buildMetaFile, trustedCertificatesFile));
Optional<DeploymentSpec> deploymentSpec = files.get(deploymentFile).map(bytes -> new String(bytes, UTF_8)).map(DeploymentSpec::fromXml);
if (requireFiles && deploymentSpec.isEmpty())
throw new IllegalArgumentException("Missing required file '" + deploymentFile + "'");
this.deploymentSpec = deploymentSpec.orElse(DeploymentSpec.empty);
this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty);
Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get);
this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString())));
this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong())));
this.parentVersion = buildMetaObject.flatMap(object -> parse(object, "parentVersion", field -> Version.fromString(field.asString())));
this.trustedCertificates = files.get(trustedCertificatesFile).map(bytes -> X509CertificateUtils.certificateListFromPem(new String(bytes, UTF_8))).orElse(List.of());
this.bundleHash = calculateBundleHash(zippedContent);
preProcessAndPopulateCache();
}
/** Returns a copy of this with the given certificate appended. */
public ApplicationPackage withTrustedCertificate(X509Certificate certificate) {
List<X509Certificate> trustedCertificates = new ArrayList<>(this.trustedCertificates);
trustedCertificates.add(certificate);
byte[] certificatesBytes = X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8);
ByteArrayOutputStream modified = new ByteArrayOutputStream(zippedContent.length + certificatesBytes.length);
ZipEntries.transferAndWrite(modified, new ByteArrayInputStream(zippedContent), trustedCertificatesFile, certificatesBytes);
return new ApplicationPackage(modified.toByteArray());
}
/** Returns a hash of the content of this package */
public String hash() { return contentHash; }
/** Hash of all files and settings that influence what is deployed to config servers. */
public String bundleHash() {
return bundleHash;
}
/** Returns the content of this package. The content <b>must not</b> be modified. */
public byte[] zippedContent() { return zippedContent; }
/**
* Returns the deployment spec from the deployment.xml file of the package content.<br>
* This is the DeploymentSpec.empty instance if this package does not contain a deployment.xml file.<br>
* <em>NB: <strong>Always</strong> read deployment spec from the {@link Application}, for deployment orchestration.</em>
*/
public DeploymentSpec deploymentSpec() { return deploymentSpec; }
/**
* Returns the validation overrides from the validation-overrides.xml file of the package content.
* This is the ValidationOverrides.empty instance if this package does not contain a validation-overrides.xml file.
*/
public ValidationOverrides validationOverrides() { return validationOverrides; }
/** Returns the platform version which package was compiled against, if known. */
public Optional<Version> compileVersion() { return compileVersion; }
/** Returns the time this package was built, if known. */
public Optional<Instant> buildTime() { return buildTime; }
/** Returns the parent version used to compile the package, if known. */
public Optional<Version> parentVersion() { return parentVersion; }
/** Returns the list of certificates trusted by this application, or an empty list if no trust configured. */
public List<X509Certificate> trustedCertificates() {
return trustedCertificates;
}
private static <Type> Optional<Type> parse(Inspector buildMetaObject, String fieldName, Function<Inspector, Type> mapper) {
Inspector field = buildMetaObject.field(fieldName);
if ( ! field.valid() || field.type() == NIX)
return Optional.empty();
try {
return Optional.of(mapper.apply(buildMetaObject.field(fieldName)));
}
catch (RuntimeException e) {
throw new IllegalArgumentException("Failed parsing \"" + fieldName + "\" in '" + buildMetaFile + "': " + Exceptions.toMessageString(e));
}
}
/** Creates a valid application package that will remove all application's deployments */
public static ApplicationPackage deploymentRemoval() {
return new ApplicationPackage(filesZip(Map.of(validationOverridesFile, allValidationOverrides().xmlForm().getBytes(UTF_8),
deploymentFile, DeploymentSpec.empty.xmlForm().getBytes(UTF_8))));
}
/** Returns a zip containing meta data about deployments of this package by the given job. */
public byte[] metaDataZip() {
return cacheZip();
}
private byte[] cacheZip() {
return filesZip(files.cache.entrySet().stream()
.filter(entry -> entry.getValue().isPresent())
.collect(toMap(entry -> entry.getKey().toString(),
entry -> entry.getValue().get())));
}
public static byte[] filesZip(Map<String, byte[]> files) {
try (ZipBuilder zipBuilder = new ZipBuilder(files.values().stream().mapToInt(bytes -> bytes.length).sum() + 512)) {
files.forEach(zipBuilder::add);
zipBuilder.close();
return zipBuilder.toByteArray();
}
}
private static ValidationOverrides allValidationOverrides() {
String until = DateTimeFormatter.ISO_LOCAL_DATE.format(Instant.now().plus(Duration.ofDays(25)).atZone(ZoneOffset.UTC));
StringBuilder validationOverridesContents = new StringBuilder(1000);
validationOverridesContents.append("<validation-overrides version=\"1.0\">\n");
for (ValidationId validationId: ValidationId.values())
validationOverridesContents.append("\t<allow until=\"").append(until).append("\">").append(validationId.value()).append("</allow>\n");
validationOverridesContents.append("</validation-overrides>\n");
return ValidationOverrides.fromXml(validationOverridesContents.toString());
}
@SuppressWarnings("deprecation")
private String calculateBundleHash(byte[] zippedContent) {
Predicate<String> entryMatcher = name -> ! name.endsWith(deploymentFile) && ! name.endsWith(buildMetaFile);
SortedMap<String, Long> crcByEntry = new TreeMap<>();
Options options = Options.standard().pathPredicate(entryMatcher);
ArchiveFile file;
try (ArchiveStreamReader reader = ArchiveStreamReader.ofZip(new ByteArrayInputStream(zippedContent), options)) {
OutputStream discard = OutputStream.nullOutputStream();
while ((file = reader.readNextTo(discard)) != null) {
crcByEntry.put(file.path().toString(), file.crc32().orElse(-1));
}
}
Funnel<SortedMap<String, Long>> funnel = (from, into) -> from.forEach((key, value) -> {
into.putBytes(key.getBytes());
into.putLong(value);
});
return Hashing.sha1().newHasher()
.putObject(crcByEntry, funnel)
.putInt(deploymentSpec.deployableHashCode())
.hash().toString();
}
@SuppressWarnings("deprecation")
public static String calculateHash(byte[] bytes) {
return Hashing.sha1().newHasher()
.putBytes(bytes)
.hash().toString();
}
/** Maps normalized paths to cached content read from a zip archive. */
private static class ZipArchiveCache {
/** Max size of each extracted file */
private static final int maxSize = 10 << 20;
private final byte[] zip;
private final Map<Path, Optional<byte[]>> cache;
public ZipArchiveCache(byte[] zip, Collection<String> prePopulated) {
this.zip = zip;
this.cache = new ConcurrentSkipListMap<>();
this.cache.putAll(read(prePopulated));
}
public Optional<byte[]> get(String path) {
return get(Paths.get(path));
}
public Optional<byte[]> get(Path path) {
return cache.computeIfAbsent(path.normalize(), read(List.of(path.normalize().toString()))::get);
}
public FileSystemWrapper wrapper() {
return FileSystemWrapper.ofFiles(Path.of("./"),
path -> get(path).isPresent(),
path -> get(path).orElseThrow(() -> new NoSuchFileException(path.toString())));
}
private Map<Path, Optional<byte[]>> read(Collection<String> names) {
var entries = ZipEntries.from(zip,
name -> names.contains(name),
maxSize,
true)
.asList().stream()
.collect(toMap(entry -> Paths.get(entry.name()).normalize(),
ZipEntries.ZipEntryWithContent::content));
names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty()));
return entries;
}
}
} |
I assumed not. | private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName), Tags.empty());
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
} | controller.applications().createInstance(applicationId.instance(instanceName), Tags.empty()); | private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName), Tags.empty());
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
return switch (request.getMethod()) {
case GET: yield handleGET(path, request);
case PUT: yield handlePUT(path, request);
case POST: yield handlePOST(path, request);
case PATCH: yield handlePATCH(path, request);
case DELETE: yield handleDELETE(path, request);
case OPTIONS: yield handleOPTIONS();
default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
};
}
catch (RestApiException.Forbidden e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (RestApiException.Unauthorized e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return switch (e.code()) {
case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR: yield ErrorResponses.logThrowing(request, log, e);
default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
};
}
catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse accessRequests(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var accessControlService = controller.serviceRegistry().accessControlService();
var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant);
var managedAccess = accessControlService.getManagedAccess(tenant);
var slime = new Slime();
var cursor = slime.setObject();
cursor.setBool("managedAccess", managedAccess);
accessRoleInformation.getPendingRequest()
.ifPresent(membershipRequest -> {
var requestCursor = cursor.setObject("pendingRequest");
requestCursor.setString("requestTime", membershipRequest.getCreationTime());
requestCursor.setString("reason", membershipRequest.getReason());
});
var auditLogCursor = cursor.setArray("auditLog");
accessRoleInformation.getAuditLog()
.forEach(auditLogEntry -> {
var entryCursor = auditLogCursor.addObject();
entryCursor.setString("created", auditLogEntry.getCreationTime());
entryCursor.setString("approver", auditLogEntry.getApprover());
entryCursor.setString("reason", auditLogEntry.getReason());
entryCursor.setString("status", auditLogEntry.getAction());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse requestSshAccess(String tenantName, HttpRequest request) {
if (!isOperator(request)) {
return ErrorResponse.forbidden("Only operators are allowed to request ssh access");
}
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only request access for cloud tenants");
controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName));
return new MessageResponse("OK");
}
private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var inspector = toSlime(request.getData()).get();
var expiry = inspector.field("expiry").valid() ?
Instant.ofEpochMilli(inspector.field("expiry").asLong()) :
Instant.now().plus(1, ChronoUnit.DAYS);
var approve = inspector.field("approve").asBool();
controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve);
return new MessageResponse("OK");
}
private HttpResponse addManagedAccess(String tenantName) {
return setManagedAccess(tenantName, true);
}
private HttpResponse removeManagedAccess(String tenantName) {
return setManagedAccess(tenantName, false);
}
private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only set access privel for cloud tenants");
controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess);
var slime = new Slime();
slime.setObject().setBool("managedAccess", managedAccess);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> handler.apply((CloudTenant) tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var contact = root.setObject("contact");
contact.setString("name", info.contact().name());
contact.setString("email", info.contact().email());
var tenant = root.setObject("tenant");
tenant.setString("company", info.name());
tenant.setString("website", info.website());
toSlime(info.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) {
return controller.tenants().get(tenantName)
.map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get()))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var mergedContact = TenantContact.empty()
.withName(getString(inspector.field("contact").field("name"), info.contact().name()))
.withEmail(getString(inspector.field("contact").field("email"), info.contact().email()));
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address());
var mergedInfo = info
.withName(getString(inspector.field("tenant").field("name"), info.name()))
.withWebsite(getString(inspector.field("tenant").field("website"), info.website()))
.withContact(mergedContact)
.withAddress(mergedAddress);
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var billingContact = info.billingContact();
var contact = root.setObject("contact");
contact.setString("name", billingContact.contact().name());
contact.setString("email", billingContact.contact().email());
contact.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var contact = info.billingContact().contact();
var address = info.billingContact().address();
var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address());
var mergedBilling = info.billingContact()
.withContact(mergedContact)
.withAddress(mergedAddress);
var mergedInfo = info.withBilling(mergedBilling);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
toSlime(cloudTenant.info().contacts(), root);
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) {
var mergedInfo = cloudTenant.info()
.withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts()));
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private void validateMergedTenantInfo(TenantInfo mergedInfo) {
if (mergedInfo.contact().name().isBlank()) {
throw new IllegalArgumentException("'contactName' cannot be empty");
}
if (mergedInfo.contact().email().isBlank()) {
throw new IllegalArgumentException("'contactEmail' cannot be empty");
}
if (! mergedInfo.contact().email().contains("@")) {
throw new IllegalArgumentException("'contactEmail' needs to be an email address");
}
if (! mergedInfo.website().isBlank()) {
try {
new URL(mergedInfo.website());
} catch (MalformedURLException e) {
throw new IllegalArgumentException("'website' needs to be a valid address");
}
}
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
Cursor contactsCursor = parentCursor.setArray("contacts");
contacts.all().forEach(contact -> {
Cursor contactCursor = contactsCursor.addObject();
Cursor audiencesArray = contactCursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
contactCursor.setString("email", email.email());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
});
}
private static TenantContacts.Audience fromAudience(String value) {
return switch (value) {
case "tenant": yield TenantContacts.Audience.TENANT;
case "notifications": yield TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
};
}
private static String toAudience(TenantContacts.Audience audience) {
return switch (audience) {
case TENANT: yield "tenant";
case NOTIFICATIONS: yield "notifications";
};
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString().trim() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantContact mergedContact = TenantContact.empty()
.withName(getString(insp.field("contactName"), oldInfo.contact().name()))
.withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email()));
TenantInfo mergedInfo = TenantInfo.empty()
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.website()))
.withContact(mergedContact)
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()))
.withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts()));
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) {
if (!insp.valid()) return oldAddress;
TenantAddress address = TenantAddress.empty()
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code()))
.withAddress(getString(insp.field("addressLines"), oldAddress.address()));
List<String> fields = List.of(address.address(),
address.code(),
address.country(),
address.city(),
address.region());
if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank))
return address;
throw new IllegalArgumentException("All address fields must be set");
}
private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) {
if (!insp.valid()) return oldContact;
String email = getString(insp.field("email"), oldContact.email());
if (!email.isBlank() && !email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return TenantContact.empty()
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()));
}
private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) {
if (!insp.valid()) return oldContact;
return TenantBilling.empty()
.withContact(updateTenantInfoContact(insp, oldContact.contact()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) {
if (!insp.valid()) return oldContacts;
List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> {
String email = inspector.field("email").asString().trim();
List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> fromAudience(audience.asString()))
.toList();
if (!email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return new TenantContacts.EmailContact(audiences, email);
}).toList();
return new TenantContacts(contacts);
}
private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) {
boolean productionOnly = showOnlyProductionInstances(request);
boolean excludeMessages = "true".equals(request.getProperty("excludeMessages"));
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
tenant.map(t -> Stream.of(TenantName.from(t)))
.orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream())
.flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream())
.filter(notification ->
propertyEquals(request, "application", ApplicationName::from, notification.source().application()) &&
propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) &&
propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) &&
propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) &&
propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) &&
propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level())))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages));
return new SlimeJsonResponse(slime);
}
private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) {
return Optional.ofNullable(request.getProperty(property))
.map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get()))
.orElse(true);
}
private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
if (!excludeMessages) {
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
}
if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value());
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
return switch (type) {
case submission, applicationPackage: yield "applicationPackage";
case testPackage: yield "testPackage";
case deployment: yield "deployment";
case feedBlock: yield "feedBlock";
case reindex: yield "reindex";
};
}
private static String notificationLevelAsString(Notification.Level level) {
return switch (level) {
case info: yield "info";
case warning: yield "warning";
case error: yield "error";
};
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
getTenantOrThrow(tenantName);
List<Application> applications = applicationName.isEmpty() ?
controller.applications().asList(tenant) :
controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get()))
.map(List::of)
.orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist"));
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : applications) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
ZoneId zone = type.zone();
RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision();
byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse devApplicationPackageDiff(RunId runId) {
DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone());
return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
final long build;
String requestedBuild = request.getProperty("build");
if (requestedBuild != null) {
if (requestedBuild.equals("latestDeployed")) {
build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision()
.map(RevisionId::number)
.orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication));
} else {
try {
build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("invalid value for request parameter 'build'", e);
}
}
} else {
build = controller.applications().requireApplication(tenantAndApplication).revisions().last()
.map(version -> version.id().number())
.orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication));
}
RevisionId revision = RevisionId.forProduction(build);
boolean tests = request.getBooleanProperty("tests");
byte[] applicationPackage = tests ?
controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) :
controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision);
String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip";
return new ZipResponse(filename, applicationPackage);
}
private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) {
Slime slime = new Slime();
OptionalInt allowMajor = OptionalInt.empty();
if (allowMajorParam != null) {
try {
allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e);
}
}
Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor);
slime.setObject().setString("compileVersion", compileVersion.toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
if (!applicationId.tenant().equals(TenantName.from(tenantName)))
return ErrorResponse.badRequest("Invalid application id");
var zoneId = requireZone(ZoneId.from(request.getProperty("zone")));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("AWS archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeAwsArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var member = mandatory("member", data).asString();
if (member.isBlank()) {
return ErrorResponse.badRequest("GCP archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + ".");
}
private HttpResponse removeGcpArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id));
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor));
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.down());
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
nodeObject.setString("group", node.group());
nodeObject.setLong("index", node.index());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
return switch (state) {
case failed: yield "failed";
case parked: yield "parked";
case dirty: yield "dirty";
case ready: yield "ready";
case active: yield "active";
case inactive: yield "inactive";
case reserved: yield "reserved";
case provisioned: yield "provisioned";
case breakfixed: yield "breakfixed";
case deprovisioned: yield "deprovisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
};
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
return switch (type) {
case admin: yield "admin";
case content: yield "content";
case container: yield "container";
case combined: yield "combined";
case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
};
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
return switch (diskSpeed) {
case fast : yield "fast";
case slow : yield "slow";
case any : yield "any";
};
}
private static String valueOf(NodeResources.StorageType storageType) {
return switch (storageType) {
case remote : yield "remote";
case local : yield "local";
case any : yield "any";
};
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
@Override
public long maxPendingBytes() {
return 1 << 26;
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
var from = Optional.ofNullable(request.getProperty("from"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.EPOCH);
var until = Optional.ofNullable(request.getProperty("until"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.now(controller.clock()));
var application = ApplicationId.from(tenantName, applicationName, instanceName);
var zone = requireZone(environment, region);
var deployment = new DeploymentId(application, zone);
var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment);
var slime = new Slime();
var root = slime.setObject();
for (var entry : events.entrySet()) {
var serviceRoot = root.setArray(entry.getKey().clusterId().value());
scalingEventsToSlime(entry.getValue(), serviceRoot);
}
return new SlimeJsonResponse(slime);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
boolean upgradeRevision = ! requestObject.field("skipRevision").asBool();
boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") +
(upgradeRevision ? "" : "revision") +
( ! upgradeRevision && ! upgradePlatform ? " and " : "") +
(upgradePlatform ? "" : "platform") +
( ! upgradeRevision || ! upgradePlatform ? " upgrade" : "");
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id + suppressedUpgrades);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), status.application());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application());
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
addRotationId(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void addRotationId(Cursor object, Instance instance) {
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.revisions().last().ifPresent(version -> {
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
addRotationId(object, instance);
List<Deployment> deployments = application.deploymentSpec().instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
Stream.concat(status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment()),
controller.jobController().active(instance.id()).stream()
.map(run -> run.id().job())
.filter(job -> job.type().environment().isManuallyDeployed()))
.map(job -> job.type().zone())
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change, Application application) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision)));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
for (var endpoint : zoneEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application)
.targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
for (var endpoint : declaredEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", application.revisions().get(deployment.revision()).stringId());
response.setLong("build", deployment.revision().number());
Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment);
response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli()));
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
if (!deployment.zone().environment().isManuallyDeployed()) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone()));
Optional.ofNullable(status.jobSteps().get(jobId))
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision()));
if ( ! status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else
response.setString("status", "running");
});
} else {
var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId()));
deploymentRun.ifPresent(run -> {
response.setString("status", run.hasEnded() ? "complete" : "running");
});
}
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
DeploymentId deploymentId = new DeploymentId(instance.id(), zone);
RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant;
RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out;
controller.routing().of(deploymentId).setRoutingStatus(status, agent);
return new MessageResponse(Text.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId())
.requiresRotation()
.primary();
if (primaryEndpoint.isPresent()) {
DeploymentRoutingContext context = controller.routing().of(deploymentId);
RoutingStatus status = context.routingStatus();
array.addString(primaryEndpoint.get().upstreamName(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.value().name());
statusObject.setString("reason", "");
statusObject.setString("agent", status.agent().name());
statusObject.setLong("timestamp", status.changedAt().getEpochSecond());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId,
serviceName,
DomainName.of(host),
HttpURL.Path.parse("/status").append(restPath),
Query.empty().add(request.getJDiscRequest().parameters()));
}
private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodes(deploymentId);
}
private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Query query = Query.empty().add(request.getJDiscRequest().parameters());
query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString());
return controller.serviceRegistry().configServer().getServiceNodePage(
deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query);
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
if (controller.system().isPublic()) {
User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
TenantInfo info = controller.tenants().require(tenant, CloudTenant.class)
.info()
.withContact(TenantContact.from(user.name(), user.email()));
controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(info);
controller.tenants().store(lockedTenant);
});
}
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if ( ! versionStatus.isActive(version) && ! isOperator(request))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Inspector buildField = toSlime(request.getData()).get().field("build");
long build = buildField.valid() ? buildField.asLong() : -1;
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
RevisionId revision = build == -1 ? application.get().revisions().last().get().id()
: getRevision(application.get(), build);
Change change = Change.of(revision);
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
private RevisionId getRevision(Application application, long build) {
return application.revisions().withPackage().stream()
.map(ApplicationVersion::id)
.filter(version -> version.number() == build)
.findFirst()
.filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(),
application.id().application(),
build))
.orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found"));
}
private HttpResponse cancelBuild(String tenantName, String applicationName, String build){
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
RevisionId revision = RevisionId.forProduction(Long.parseLong(build));
controller.applications().lockApplicationOrThrow(id, application -> {
controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped())));
for (Instance instance : application.get().instances().values())
if (instance.change().revision().equals(Optional.of(revision)))
controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION);
});
return new MessageResponse("Marked build '" + build + "' as non-deployable");
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.toList();
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.toList();
Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null;
boolean indexedOnly = request.getBooleanProperty("indexedOnly");
controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed);
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) +
(indexedOnly ? ", for indexed types" : "") +
(speed != null ? ", with speed " + speed : ""));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed));
}
private static String toString(ApplicationReindexing.State state) {
return switch (state) {
case PENDING: yield "pending";
case RUNNING: yield "running";
case FAILED: yield "failed";
case SUCCESSFUL: yield "successful";
};
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone()),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
ensureApplicationExists(TenantAndApplicationId.from(id), request);
boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("dryRun", options))
.map(Boolean::valueOf)
.orElse(false);
controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request));
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
boolean forget = request.getBooleanProperty("forget");
if (forget && !isOperator(request))
return ErrorResponse.forbidden("Only operators can forget a tenant");
controller.tenants().delete(TenantName.from(tenantName),
Optional.of(accessControlRequests.credentials(TenantName.from(tenantName),
toSlime(request.getData()).get(),
request.getJDiscRequest())),
forget);
return new MessageResponse("Deleted tenant " + tenantName);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId()))
.filter(run -> ! run.hasEnded())
.ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName()));
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(prodInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(prodInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
ApplicationId toTest = type.isProduction() ? prodInstanceId : id;
if ( ! type.isProduction())
deployments.add(new DeploymentId(toTest, type.zone()));
Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone());
if (deployment == null)
throw new NotExistsException(toTest + " is not deployed in " + type.zone());
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
deployment.version(),
deployment.revision(),
deployment.at(),
controller.routing().readTestRunnerEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null);
if (report != null) {
Cursor cursor = report.get();
boolean force = request.getBooleanProperty("force");
if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) {
throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString());
}
}
Slime requestPayload;
try {
requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes());
} catch (Exception e) {
throw new IllegalArgumentException("Missing or invalid JSON in request content", e);
}
Cursor requestPayloadCursor = requestPayload.get();
String configId = requestPayloadCursor.field("configId").asString();
long expiresAt = requestPayloadCursor.field("expiresAt").asLong();
if (configId.isEmpty()) {
throw new IllegalArgumentException("Missing configId");
}
Cursor artifactsCursor = requestPayloadCursor.field("artifacts");
int artifactEntries = artifactsCursor.entries();
if (artifactEntries == 0) {
throw new IllegalArgumentException("Missing or empty 'artifacts'");
}
Slime dumpRequest = new Slime();
Cursor dumpRequestCursor = dumpRequest.setObject();
dumpRequestCursor.setLong("createdMillis", controller.clock().millis());
dumpRequestCursor.setString("configId", configId);
Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts");
for (int i = 0; i < artifactEntries; i++) {
dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString());
}
if (expiresAt > 0) {
dumpRequestCursor.setLong("expiresAt", expiresAt);
}
Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions");
if (dumpOptionsCursor.children() > 0) {
SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions"));
}
var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest))));
nodeRepository.updateReports(zone, hostname, reportsUpdate);
boolean wait = request.getBooleanProperty("wait");
if (!wait) return new MessageResponse("Request created");
return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname);
}
private HttpResponse getServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname)
.orElseThrow(() -> new NotExistsException("No service dump for node " + hostname));
return new SlimeJsonResponse(report);
}
private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
int pollInterval = 2;
Slime report;
while (true) {
report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get();
Cursor cursor = report.get();
if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) {
break;
}
final Slime copyForLambda = report;
log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda))));
log.fine("Sleeping " + pollInterval + " seconds before checking report status again");
controller.sleeper().sleep(Duration.ofSeconds(pollInterval));
}
return new SlimeJsonResponse(report);
}
private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
Node node;
try {
node = nodeRepository.getNode(zone, hostname);
} catch (IllegalArgumentException e) {
throw new NotExistsException(hostname);
}
ApplicationId app = ApplicationId.from(tenant, application, instance);
ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner"));
if (!app.equals(owner)) {
throw new IllegalArgumentException("Node is not owned by " + app.toFullString());
}
String json = node.reports().get("serviceDump");
if (json == null) return Optional.empty();
return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
try {
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(object.setObject("quota"), usedQuota);
} catch (Exception e) {
log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e)));
}
cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role));
toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess"));
break;
}
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values();
if (instances.isEmpty() && !showOnlyActiveInstances(request))
toSlime(application.id(), applicationArray.addObject(), request);
for (Instance instance : instances) {
if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
} else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(ArchiveAccess archiveAccess, Cursor object) {
archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role));
archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member));
}
private void toSlime(Cursor object, QuotaUsage usage) {
object.setDouble("budgetUsed", usage.rate());
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("peakCpu", utilization.peakCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("peakMemory", utilization.peakMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
utilizationObject.setDouble("peakDisk", utilization.peakDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream()
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment)))
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
if (tenant.type() == Tenant.Type.deleted)
object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new IllegalArgumentException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static boolean showOnlyActiveInstances(HttpRequest request) {
return "true".equals(request.getProperty("activeInstances"));
}
private static boolean includeDeleted(HttpRequest request) {
return "true".equals(request.getProperty("includeDeleted"));
}
private static String tenantType(Tenant tenant) {
return switch (tenant.type()) {
case athenz: yield "ATHENS";
case cloud: yield "CLOUD";
case deleted: yield "DELETED";
};
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry());
}
private RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong();
projectId = projectId == 0 ? 1 : projectId;
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
Optional<String> description = optional("description", submitOptions);
int risk = (int) submitOptions.field("risk").asLong();
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application);
ensureApplicationExists(id, request);
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId);
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), 0),
0);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
return requireZone(ZoneId.from(environment, region));
}
private ZoneId requireZone(ZoneId zone) {
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
return switch (state) {
case in: yield "IN";
case out: yield "OUT";
case unknown: yield "UNKNOWN";
};
}
private static String endpointScopeString(Endpoint.Scope scope) {
return switch (scope) {
case weighted: yield "weighted";
case application: yield "application";
case global: yield "global";
case zone: yield "zone";
};
}
private static String routingMethodString(RoutingMethod method) {
return switch (method) {
case exclusive: yield "exclusive";
case sharedLayer4: yield "sharedLayer4";
};
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) {
if (controller.applications().getApplication(id).isEmpty()) {
if (controller.system().isPublic() || hasOktaContext(request)) {
log.fine("Application does not exist in public, creating: " + id);
var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest());
controller.applications().createApplication(id, credentials);
} else {
log.fine("Application does not exist in hosted, failing: " + id);
throw new IllegalArgumentException("Application does not exist. Create application in Console first.");
}
}
}
private boolean hasOktaContext(HttpRequest request) {
try {
OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context());
return true;
} catch (IllegalArgumentException e) {
return false;
}
}
private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) {
List<ZoneId> productionZones = spec.zones().stream()
.filter(z -> z.region().isPresent())
.map(z -> ZoneId.from(z.environment(), z.region().get()))
.toList();
return deployments.stream()
.sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone())))
.collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
return switch (request.getMethod()) {
case GET: yield handleGET(path, request);
case PUT: yield handlePUT(path, request);
case POST: yield handlePOST(path, request);
case PATCH: yield handlePATCH(path, request);
case DELETE: yield handleDELETE(path, request);
case OPTIONS: yield handleOPTIONS();
default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
};
}
catch (RestApiException.Forbidden e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (RestApiException.Unauthorized e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return switch (e.code()) {
case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR: yield ErrorResponses.logThrowing(request, log, e);
default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
};
}
catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse accessRequests(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var accessControlService = controller.serviceRegistry().accessControlService();
var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant);
var managedAccess = accessControlService.getManagedAccess(tenant);
var slime = new Slime();
var cursor = slime.setObject();
cursor.setBool("managedAccess", managedAccess);
accessRoleInformation.getPendingRequest()
.ifPresent(membershipRequest -> {
var requestCursor = cursor.setObject("pendingRequest");
requestCursor.setString("requestTime", membershipRequest.getCreationTime());
requestCursor.setString("reason", membershipRequest.getReason());
});
var auditLogCursor = cursor.setArray("auditLog");
accessRoleInformation.getAuditLog()
.forEach(auditLogEntry -> {
var entryCursor = auditLogCursor.addObject();
entryCursor.setString("created", auditLogEntry.getCreationTime());
entryCursor.setString("approver", auditLogEntry.getApprover());
entryCursor.setString("reason", auditLogEntry.getReason());
entryCursor.setString("status", auditLogEntry.getAction());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse requestSshAccess(String tenantName, HttpRequest request) {
if (!isOperator(request)) {
return ErrorResponse.forbidden("Only operators are allowed to request ssh access");
}
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only request access for cloud tenants");
controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName));
return new MessageResponse("OK");
}
private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var inspector = toSlime(request.getData()).get();
var expiry = inspector.field("expiry").valid() ?
Instant.ofEpochMilli(inspector.field("expiry").asLong()) :
Instant.now().plus(1, ChronoUnit.DAYS);
var approve = inspector.field("approve").asBool();
controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve);
return new MessageResponse("OK");
}
private HttpResponse addManagedAccess(String tenantName) {
return setManagedAccess(tenantName, true);
}
private HttpResponse removeManagedAccess(String tenantName) {
return setManagedAccess(tenantName, false);
}
private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only set access privel for cloud tenants");
controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess);
var slime = new Slime();
slime.setObject().setBool("managedAccess", managedAccess);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> handler.apply((CloudTenant) tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var contact = root.setObject("contact");
contact.setString("name", info.contact().name());
contact.setString("email", info.contact().email());
var tenant = root.setObject("tenant");
tenant.setString("company", info.name());
tenant.setString("website", info.website());
toSlime(info.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) {
return controller.tenants().get(tenantName)
.map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get()))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var mergedContact = TenantContact.empty()
.withName(getString(inspector.field("contact").field("name"), info.contact().name()))
.withEmail(getString(inspector.field("contact").field("email"), info.contact().email()));
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address());
var mergedInfo = info
.withName(getString(inspector.field("tenant").field("name"), info.name()))
.withWebsite(getString(inspector.field("tenant").field("website"), info.website()))
.withContact(mergedContact)
.withAddress(mergedAddress);
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var billingContact = info.billingContact();
var contact = root.setObject("contact");
contact.setString("name", billingContact.contact().name());
contact.setString("email", billingContact.contact().email());
contact.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var contact = info.billingContact().contact();
var address = info.billingContact().address();
var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address());
var mergedBilling = info.billingContact()
.withContact(mergedContact)
.withAddress(mergedAddress);
var mergedInfo = info.withBilling(mergedBilling);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
toSlime(cloudTenant.info().contacts(), root);
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) {
var mergedInfo = cloudTenant.info()
.withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts()));
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private void validateMergedTenantInfo(TenantInfo mergedInfo) {
if (mergedInfo.contact().name().isBlank()) {
throw new IllegalArgumentException("'contactName' cannot be empty");
}
if (mergedInfo.contact().email().isBlank()) {
throw new IllegalArgumentException("'contactEmail' cannot be empty");
}
if (! mergedInfo.contact().email().contains("@")) {
throw new IllegalArgumentException("'contactEmail' needs to be an email address");
}
if (! mergedInfo.website().isBlank()) {
try {
new URL(mergedInfo.website());
} catch (MalformedURLException e) {
throw new IllegalArgumentException("'website' needs to be a valid address");
}
}
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
Cursor contactsCursor = parentCursor.setArray("contacts");
contacts.all().forEach(contact -> {
Cursor contactCursor = contactsCursor.addObject();
Cursor audiencesArray = contactCursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
contactCursor.setString("email", email.email());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
});
}
private static TenantContacts.Audience fromAudience(String value) {
return switch (value) {
case "tenant": yield TenantContacts.Audience.TENANT;
case "notifications": yield TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
};
}
private static String toAudience(TenantContacts.Audience audience) {
return switch (audience) {
case TENANT: yield "tenant";
case NOTIFICATIONS: yield "notifications";
};
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString().trim() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantContact mergedContact = TenantContact.empty()
.withName(getString(insp.field("contactName"), oldInfo.contact().name()))
.withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email()));
TenantInfo mergedInfo = TenantInfo.empty()
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.website()))
.withContact(mergedContact)
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()))
.withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts()));
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) {
if (!insp.valid()) return oldAddress;
TenantAddress address = TenantAddress.empty()
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code()))
.withAddress(getString(insp.field("addressLines"), oldAddress.address()));
List<String> fields = List.of(address.address(),
address.code(),
address.country(),
address.city(),
address.region());
if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank))
return address;
throw new IllegalArgumentException("All address fields must be set");
}
private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) {
if (!insp.valid()) return oldContact;
String email = getString(insp.field("email"), oldContact.email());
if (!email.isBlank() && !email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return TenantContact.empty()
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()));
}
private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) {
if (!insp.valid()) return oldContact;
return TenantBilling.empty()
.withContact(updateTenantInfoContact(insp, oldContact.contact()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) {
if (!insp.valid()) return oldContacts;
List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> {
String email = inspector.field("email").asString().trim();
List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> fromAudience(audience.asString()))
.toList();
if (!email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return new TenantContacts.EmailContact(audiences, email);
}).toList();
return new TenantContacts(contacts);
}
private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) {
boolean productionOnly = showOnlyProductionInstances(request);
boolean excludeMessages = "true".equals(request.getProperty("excludeMessages"));
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
tenant.map(t -> Stream.of(TenantName.from(t)))
.orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream())
.flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream())
.filter(notification ->
propertyEquals(request, "application", ApplicationName::from, notification.source().application()) &&
propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) &&
propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) &&
propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) &&
propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) &&
propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level())))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages));
return new SlimeJsonResponse(slime);
}
private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) {
return Optional.ofNullable(request.getProperty(property))
.map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get()))
.orElse(true);
}
private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
if (!excludeMessages) {
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
}
if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value());
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
return switch (type) {
case submission, applicationPackage: yield "applicationPackage";
case testPackage: yield "testPackage";
case deployment: yield "deployment";
case feedBlock: yield "feedBlock";
case reindex: yield "reindex";
};
}
private static String notificationLevelAsString(Notification.Level level) {
return switch (level) {
case info: yield "info";
case warning: yield "warning";
case error: yield "error";
};
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
getTenantOrThrow(tenantName);
List<Application> applications = applicationName.isEmpty() ?
controller.applications().asList(tenant) :
controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get()))
.map(List::of)
.orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist"));
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : applications) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
ZoneId zone = type.zone();
RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision();
byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse devApplicationPackageDiff(RunId runId) {
DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone());
return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
final long build;
String requestedBuild = request.getProperty("build");
if (requestedBuild != null) {
if (requestedBuild.equals("latestDeployed")) {
build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision()
.map(RevisionId::number)
.orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication));
} else {
try {
build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("invalid value for request parameter 'build'", e);
}
}
} else {
build = controller.applications().requireApplication(tenantAndApplication).revisions().last()
.map(version -> version.id().number())
.orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication));
}
RevisionId revision = RevisionId.forProduction(build);
boolean tests = request.getBooleanProperty("tests");
byte[] applicationPackage = tests ?
controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) :
controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision);
String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip";
return new ZipResponse(filename, applicationPackage);
}
private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) {
Slime slime = new Slime();
OptionalInt allowMajor = OptionalInt.empty();
if (allowMajorParam != null) {
try {
allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e);
}
}
Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor);
slime.setObject().setString("compileVersion", compileVersion.toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
if (!applicationId.tenant().equals(TenantName.from(tenantName)))
return ErrorResponse.badRequest("Invalid application id");
var zoneId = requireZone(ZoneId.from(request.getProperty("zone")));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("AWS archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeAwsArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var member = mandatory("member", data).asString();
if (member.isBlank()) {
return ErrorResponse.badRequest("GCP archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + ".");
}
private HttpResponse removeGcpArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id));
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor));
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.down());
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
nodeObject.setString("group", node.group());
nodeObject.setLong("index", node.index());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
return switch (state) {
case failed: yield "failed";
case parked: yield "parked";
case dirty: yield "dirty";
case ready: yield "ready";
case active: yield "active";
case inactive: yield "inactive";
case reserved: yield "reserved";
case provisioned: yield "provisioned";
case breakfixed: yield "breakfixed";
case deprovisioned: yield "deprovisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
};
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
return switch (type) {
case admin: yield "admin";
case content: yield "content";
case container: yield "container";
case combined: yield "combined";
case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
};
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
return switch (diskSpeed) {
case fast : yield "fast";
case slow : yield "slow";
case any : yield "any";
};
}
private static String valueOf(NodeResources.StorageType storageType) {
return switch (storageType) {
case remote : yield "remote";
case local : yield "local";
case any : yield "any";
};
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
@Override
public long maxPendingBytes() {
return 1 << 26;
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
var from = Optional.ofNullable(request.getProperty("from"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.EPOCH);
var until = Optional.ofNullable(request.getProperty("until"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.now(controller.clock()));
var application = ApplicationId.from(tenantName, applicationName, instanceName);
var zone = requireZone(environment, region);
var deployment = new DeploymentId(application, zone);
var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment);
var slime = new Slime();
var root = slime.setObject();
for (var entry : events.entrySet()) {
var serviceRoot = root.setArray(entry.getKey().clusterId().value());
scalingEventsToSlime(entry.getValue(), serviceRoot);
}
return new SlimeJsonResponse(slime);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
boolean upgradeRevision = ! requestObject.field("skipRevision").asBool();
boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") +
(upgradeRevision ? "" : "revision") +
( ! upgradeRevision && ! upgradePlatform ? " and " : "") +
(upgradePlatform ? "" : "platform") +
( ! upgradeRevision || ! upgradePlatform ? " upgrade" : "");
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id + suppressedUpgrades);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), status.application());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application());
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
addRotationId(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void addRotationId(Cursor object, Instance instance) {
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.revisions().last().ifPresent(version -> {
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
addRotationId(object, instance);
List<Deployment> deployments = application.deploymentSpec().instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
Stream.concat(status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment()),
controller.jobController().active(instance.id()).stream()
.map(run -> run.id().job())
.filter(job -> job.type().environment().isManuallyDeployed()))
.map(job -> job.type().zone())
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change, Application application) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision)));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
for (var endpoint : zoneEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application)
.targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
for (var endpoint : declaredEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", application.revisions().get(deployment.revision()).stringId());
response.setLong("build", deployment.revision().number());
Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment);
response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli()));
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
if (!deployment.zone().environment().isManuallyDeployed()) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone()));
Optional.ofNullable(status.jobSteps().get(jobId))
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision()));
if ( ! status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else
response.setString("status", "running");
});
} else {
var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId()));
deploymentRun.ifPresent(run -> {
response.setString("status", run.hasEnded() ? "complete" : "running");
});
}
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
DeploymentId deploymentId = new DeploymentId(instance.id(), zone);
RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant;
RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out;
controller.routing().of(deploymentId).setRoutingStatus(status, agent);
return new MessageResponse(Text.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId())
.requiresRotation()
.primary();
if (primaryEndpoint.isPresent()) {
DeploymentRoutingContext context = controller.routing().of(deploymentId);
RoutingStatus status = context.routingStatus();
array.addString(primaryEndpoint.get().upstreamName(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.value().name());
statusObject.setString("reason", "");
statusObject.setString("agent", status.agent().name());
statusObject.setLong("timestamp", status.changedAt().getEpochSecond());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId,
serviceName,
DomainName.of(host),
HttpURL.Path.parse("/status").append(restPath),
Query.empty().add(request.getJDiscRequest().parameters()));
}
private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodes(deploymentId);
}
private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Query query = Query.empty().add(request.getJDiscRequest().parameters());
query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString());
return controller.serviceRegistry().configServer().getServiceNodePage(
deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query);
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
if (controller.system().isPublic()) {
User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
TenantInfo info = controller.tenants().require(tenant, CloudTenant.class)
.info()
.withContact(TenantContact.from(user.name(), user.email()));
controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(info);
controller.tenants().store(lockedTenant);
});
}
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if ( ! versionStatus.isActive(version) && ! isOperator(request))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Inspector buildField = toSlime(request.getData()).get().field("build");
long build = buildField.valid() ? buildField.asLong() : -1;
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
RevisionId revision = build == -1 ? application.get().revisions().last().get().id()
: getRevision(application.get(), build);
Change change = Change.of(revision);
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
private RevisionId getRevision(Application application, long build) {
return application.revisions().withPackage().stream()
.map(ApplicationVersion::id)
.filter(version -> version.number() == build)
.findFirst()
.filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(),
application.id().application(),
build))
.orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found"));
}
private HttpResponse cancelBuild(String tenantName, String applicationName, String build){
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
RevisionId revision = RevisionId.forProduction(Long.parseLong(build));
controller.applications().lockApplicationOrThrow(id, application -> {
controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped())));
for (Instance instance : application.get().instances().values())
if (instance.change().revision().equals(Optional.of(revision)))
controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION);
});
return new MessageResponse("Marked build '" + build + "' as non-deployable");
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.toList();
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.toList();
Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null;
boolean indexedOnly = request.getBooleanProperty("indexedOnly");
controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed);
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) +
(indexedOnly ? ", for indexed types" : "") +
(speed != null ? ", with speed " + speed : ""));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed));
}
private static String toString(ApplicationReindexing.State state) {
return switch (state) {
case PENDING: yield "pending";
case RUNNING: yield "running";
case FAILED: yield "failed";
case SUCCESSFUL: yield "successful";
};
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone()),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
ensureApplicationExists(TenantAndApplicationId.from(id), request);
boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("dryRun", options))
.map(Boolean::valueOf)
.orElse(false);
controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request));
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
boolean forget = request.getBooleanProperty("forget");
if (forget && !isOperator(request))
return ErrorResponse.forbidden("Only operators can forget a tenant");
controller.tenants().delete(TenantName.from(tenantName),
Optional.of(accessControlRequests.credentials(TenantName.from(tenantName),
toSlime(request.getData()).get(),
request.getJDiscRequest())),
forget);
return new MessageResponse("Deleted tenant " + tenantName);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId()))
.filter(run -> ! run.hasEnded())
.ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName()));
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(prodInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(prodInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
ApplicationId toTest = type.isProduction() ? prodInstanceId : id;
if ( ! type.isProduction())
deployments.add(new DeploymentId(toTest, type.zone()));
Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone());
if (deployment == null)
throw new NotExistsException(toTest + " is not deployed in " + type.zone());
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
deployment.version(),
deployment.revision(),
deployment.at(),
controller.routing().readTestRunnerEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null);
if (report != null) {
Cursor cursor = report.get();
boolean force = request.getBooleanProperty("force");
if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) {
throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString());
}
}
Slime requestPayload;
try {
requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes());
} catch (Exception e) {
throw new IllegalArgumentException("Missing or invalid JSON in request content", e);
}
Cursor requestPayloadCursor = requestPayload.get();
String configId = requestPayloadCursor.field("configId").asString();
long expiresAt = requestPayloadCursor.field("expiresAt").asLong();
if (configId.isEmpty()) {
throw new IllegalArgumentException("Missing configId");
}
Cursor artifactsCursor = requestPayloadCursor.field("artifacts");
int artifactEntries = artifactsCursor.entries();
if (artifactEntries == 0) {
throw new IllegalArgumentException("Missing or empty 'artifacts'");
}
Slime dumpRequest = new Slime();
Cursor dumpRequestCursor = dumpRequest.setObject();
dumpRequestCursor.setLong("createdMillis", controller.clock().millis());
dumpRequestCursor.setString("configId", configId);
Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts");
for (int i = 0; i < artifactEntries; i++) {
dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString());
}
if (expiresAt > 0) {
dumpRequestCursor.setLong("expiresAt", expiresAt);
}
Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions");
if (dumpOptionsCursor.children() > 0) {
SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions"));
}
var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest))));
nodeRepository.updateReports(zone, hostname, reportsUpdate);
boolean wait = request.getBooleanProperty("wait");
if (!wait) return new MessageResponse("Request created");
return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname);
}
private HttpResponse getServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname)
.orElseThrow(() -> new NotExistsException("No service dump for node " + hostname));
return new SlimeJsonResponse(report);
}
private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
int pollInterval = 2;
Slime report;
while (true) {
report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get();
Cursor cursor = report.get();
if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) {
break;
}
final Slime copyForLambda = report;
log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda))));
log.fine("Sleeping " + pollInterval + " seconds before checking report status again");
controller.sleeper().sleep(Duration.ofSeconds(pollInterval));
}
return new SlimeJsonResponse(report);
}
private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
Node node;
try {
node = nodeRepository.getNode(zone, hostname);
} catch (IllegalArgumentException e) {
throw new NotExistsException(hostname);
}
ApplicationId app = ApplicationId.from(tenant, application, instance);
ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner"));
if (!app.equals(owner)) {
throw new IllegalArgumentException("Node is not owned by " + app.toFullString());
}
String json = node.reports().get("serviceDump");
if (json == null) return Optional.empty();
return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
try {
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(object.setObject("quota"), usedQuota);
} catch (Exception e) {
log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e)));
}
cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role));
toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess"));
break;
}
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values();
if (instances.isEmpty() && !showOnlyActiveInstances(request))
toSlime(application.id(), applicationArray.addObject(), request);
for (Instance instance : instances) {
if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
} else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(ArchiveAccess archiveAccess, Cursor object) {
archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role));
archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member));
}
private void toSlime(Cursor object, QuotaUsage usage) {
object.setDouble("budgetUsed", usage.rate());
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("peakCpu", utilization.peakCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("peakMemory", utilization.peakMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
utilizationObject.setDouble("peakDisk", utilization.peakDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream()
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment)))
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
if (tenant.type() == Tenant.Type.deleted)
object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new IllegalArgumentException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static boolean showOnlyActiveInstances(HttpRequest request) {
return "true".equals(request.getProperty("activeInstances"));
}
private static boolean includeDeleted(HttpRequest request) {
return "true".equals(request.getProperty("includeDeleted"));
}
private static String tenantType(Tenant tenant) {
return switch (tenant.type()) {
case athenz: yield "ATHENS";
case cloud: yield "CLOUD";
case deleted: yield "DELETED";
};
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry());
}
private RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong();
projectId = projectId == 0 ? 1 : projectId;
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
Optional<String> description = optional("description", submitOptions);
int risk = (int) submitOptions.field("risk").asLong();
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application);
ensureApplicationExists(id, request);
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId);
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), 0),
0);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
return requireZone(ZoneId.from(environment, region));
}
private ZoneId requireZone(ZoneId zone) {
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
return switch (state) {
case in: yield "IN";
case out: yield "OUT";
case unknown: yield "UNKNOWN";
};
}
private static String endpointScopeString(Endpoint.Scope scope) {
return switch (scope) {
case weighted: yield "weighted";
case application: yield "application";
case global: yield "global";
case zone: yield "zone";
};
}
private static String routingMethodString(RoutingMethod method) {
return switch (method) {
case exclusive: yield "exclusive";
case sharedLayer4: yield "sharedLayer4";
};
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) {
if (controller.applications().getApplication(id).isEmpty()) {
if (controller.system().isPublic() || hasOktaContext(request)) {
log.fine("Application does not exist in public, creating: " + id);
var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest());
controller.applications().createApplication(id, credentials);
} else {
log.fine("Application does not exist in hosted, failing: " + id);
throw new IllegalArgumentException("Application does not exist. Create application in Console first.");
}
}
}
private boolean hasOktaContext(HttpRequest request) {
try {
OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context());
return true;
} catch (IllegalArgumentException e) {
return false;
}
}
private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) {
List<ZoneId> productionZones = spec.zones().stream()
.filter(z -> z.region().isPresent())
.map(z -> ZoneId.from(z.environment(), z.region().get()))
.toList();
return deployments.stream()
.sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone())))
.collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
} |
Discussed this offline; key id propagation etc. will change Soon™ after some longer term design decisions have been made. | private String corePublicKeyFlagValue(NodeAgentContext context) {
return coreEncryptionPublicKeyIdFlag.with(FetchVector.Dimension.NODE_TYPE, context.nodeType().name()).value();
} | return coreEncryptionPublicKeyIdFlag.with(FetchVector.Dimension.NODE_TYPE, context.nodeType().name()).value(); | private String corePublicKeyFlagValue(NodeAgentContext context) {
return coreEncryptionPublicKeyIdFlag.with(FetchVector.Dimension.NODE_TYPE, context.nodeType().name()).value();
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String METADATA2_FILE_NAME = "metadata2.json";
private static final String COMPRESSED_EXTENSION = ".zst";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final Cores cores;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final SecretSharedKeySupplier secretSharedKeySupplier;
private final BooleanFlag reportCoresViaCfgFlag;
private final StringFlag coreEncryptionPublicKeyIdFlag;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
FlagSource flagSource) {
this(coreCollector, cores, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), (ctx) -> Optional.empty() /*TODO*/,
flagSource);
}
public CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
SecretSharedKeySupplier secretSharedKeySupplier, FlagSource flagSource) {
this(coreCollector, cores, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), secretSharedKeySupplier,
flagSource);
}
CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
SecretSharedKeySupplier secretSharedKeySupplier, FlagSource flagSource) {
this.coreCollector = coreCollector;
this.cores = cores;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
this.reportCoresViaCfgFlag = Flags.REPORT_CORES_VIA_CFG.bindTo(flagSource);
this.coreEncryptionPublicKeyIdFlag = Flags.CORE_ENCRYPTION_PUBLIC_KEY_ID.bindTo(flagSource);
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier,
Optional<DockerImage> dockerImage, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(context, containerCrashPath, containerProcessingPath)
.ifPresent(path -> {
if (reportCoresViaCfgFlag.with(FetchVector.Dimension.NODE_TYPE, context.nodeType().name()).value()) {
processAndReportSingleCoreDump2(context, path, dockerImage);
} else {
processAndReportSingleCoredump(context, path, nodeAttributesSupplier);
}
});
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(context, containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
Predicate<String> isCoreDump = filename -> !HS_ERR_PATTERN.matcher(filename).matches();
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(attributes -> {
if (isReadyForProcessing(attributes)) {
return true;
} else {
if (isCoreDump.test(attributes.filename()))
context.log(logger, attributes.path() + " is still being written");
return false;
}
})
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> isCoreDump.test(toProcess.get(i).getFileName().toString()))
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = containerProcessingPath.resolve(coredumpIdSupplier.get());
new MakeDirectory(enqueuedDir).createParents().converge(context);
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
new FileMover(path, enqueuedDir.resolve(prefix + path.getFileName())).converge(context);
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.of(corePublicKeyFlagValue(context))
.filter(k -> !k.isEmpty())
.map(KeyId::ofString)
.flatMap(secretSharedKeySupplier::create);
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
}
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
new FileDeleter(coreFile).converge(context);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
new MakeDirectory(newCoredumpDirectory).createParents().converge(context);
new FileMover(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()))
.converge(context);
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
void processAndReportSingleCoreDump2(NodeAgentContext context, ContainerPath coreDumpDirectory,
Optional<DockerImage> dockerImage) {
CoreDumpMetadata metadata = gatherMetadata(context, coreDumpDirectory);
dockerImage.ifPresent(metadata::setDockerImage);
dockerImage.flatMap(DockerImage::tag).ifPresent(metadata::setVespaVersion);
dockerImage.ifPresent(metadata::setDockerImage);
Optional<SecretSharedKey> sharedCoreKey = Optional.of(corePublicKeyFlagValue(context))
.filter(k -> !k.isEmpty())
.map(KeyId::ofString)
.flatMap(secretSharedKeySupplier::create);
sharedCoreKey.map(key -> key.sealedSharedKey().toTokenString()).ifPresent(metadata::setDecryptionToken);
String coreDumpId = coreDumpDirectory.getFileName().toString();
cores.report(context.hostname(), coreDumpId, metadata);
context.log(logger, "Core dump reported: " + coreDumpId);
finishProcessing(context, coreDumpDirectory, sharedCoreKey);
}
private CoreDumpMetadata gatherMetadata(NodeAgentContext context, ContainerPath coreDumpDirectory) {
ContainerPath metadataPath = coreDumpDirectory.resolve(METADATA2_FILE_NAME);
Optional<ReportCoreDumpRequest> request = ReportCoreDumpRequest.load(metadataPath);
if (request.isPresent()) {
return request.map(requestInstance -> {
var metadata = new CoreDumpMetadata();
requestInstance.populateMetadata(metadata, FileSystems.getDefault());
return metadata;
})
.get();
}
ContainerPath coreDumpFile = findCoredumpFileInProcessingDirectory(coreDumpDirectory);
CoreDumpMetadata metadata = coreCollector.collect2(context, coreDumpFile);
metadata.setCpuMicrocodeVersion(getMicrocodeVersion())
.setKernelVersion(System.getProperty("os.version"))
.setCoreDumpPath(doneCoredumpsPath.resolve(context.containerName().asString())
.resolve(coreDumpDirectory.getFileName().toString())
.resolve(coreDumpFile.getFileName().toString()));
ReportCoreDumpRequest requestInstance = new ReportCoreDumpRequest();
requestInstance.fillFrom(metadata);
requestInstance.save(metadataPath);
context.log(logger, "Wrote " + metadataPath.pathOnHost());
return metadata;
}
private String getMicrocodeVersion() {
String output = uncheck(() -> Files.readAllLines(Paths.get("/proc/cpuinfo")).stream()
.filter(line -> line.startsWith("microcode"))
.findFirst()
.orElse("microcode : UNKNOWN"));
String[] results = output.split(":");
if (results.length != 2) {
throw ConvergenceException.ofError("Result from detect microcode command not as expected: " + output);
}
return results[1].trim();
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String METADATA2_FILE_NAME = "metadata2.json";
private static final String COMPRESSED_EXTENSION = ".zst";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final Cores cores;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final SecretSharedKeySupplier secretSharedKeySupplier;
private final BooleanFlag reportCoresViaCfgFlag;
private final StringFlag coreEncryptionPublicKeyIdFlag;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
FlagSource flagSource) {
this(coreCollector, cores, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), (ctx) -> Optional.empty() /*TODO*/,
flagSource);
}
public CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
SecretSharedKeySupplier secretSharedKeySupplier, FlagSource flagSource) {
this(coreCollector, cores, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), secretSharedKeySupplier,
flagSource);
}
CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
SecretSharedKeySupplier secretSharedKeySupplier, FlagSource flagSource) {
this.coreCollector = coreCollector;
this.cores = cores;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
this.reportCoresViaCfgFlag = Flags.REPORT_CORES_VIA_CFG.bindTo(flagSource);
this.coreEncryptionPublicKeyIdFlag = Flags.CORE_ENCRYPTION_PUBLIC_KEY_ID.bindTo(flagSource);
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier,
Optional<DockerImage> dockerImage, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(context, containerCrashPath, containerProcessingPath)
.ifPresent(path -> {
if (reportCoresViaCfgFlag.with(FetchVector.Dimension.NODE_TYPE, context.nodeType().name()).value()) {
processAndReportSingleCoreDump2(context, path, dockerImage);
} else {
processAndReportSingleCoredump(context, path, nodeAttributesSupplier);
}
});
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(context, containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
Predicate<String> isCoreDump = filename -> !HS_ERR_PATTERN.matcher(filename).matches();
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(attributes -> {
if (isReadyForProcessing(attributes)) {
return true;
} else {
if (isCoreDump.test(attributes.filename()))
context.log(logger, attributes.path() + " is still being written");
return false;
}
})
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> isCoreDump.test(toProcess.get(i).getFileName().toString()))
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = containerProcessingPath.resolve(coredumpIdSupplier.get());
new MakeDirectory(enqueuedDir).createParents().converge(context);
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
new FileMover(path, enqueuedDir.resolve(prefix + path.getFileName())).converge(context);
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.of(corePublicKeyFlagValue(context))
.filter(k -> !k.isEmpty())
.map(KeyId::ofString)
.flatMap(secretSharedKeySupplier::create);
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
}
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
new FileDeleter(coreFile).converge(context);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
new MakeDirectory(newCoredumpDirectory).createParents().converge(context);
new FileMover(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()))
.converge(context);
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
void processAndReportSingleCoreDump2(NodeAgentContext context, ContainerPath coreDumpDirectory,
Optional<DockerImage> dockerImage) {
CoreDumpMetadata metadata = gatherMetadata(context, coreDumpDirectory);
dockerImage.ifPresent(metadata::setDockerImage);
dockerImage.flatMap(DockerImage::tag).ifPresent(metadata::setVespaVersion);
dockerImage.ifPresent(metadata::setDockerImage);
Optional<SecretSharedKey> sharedCoreKey = Optional.of(corePublicKeyFlagValue(context))
.filter(k -> !k.isEmpty())
.map(KeyId::ofString)
.flatMap(secretSharedKeySupplier::create);
sharedCoreKey.map(key -> key.sealedSharedKey().toTokenString()).ifPresent(metadata::setDecryptionToken);
String coreDumpId = coreDumpDirectory.getFileName().toString();
cores.report(context.hostname(), coreDumpId, metadata);
context.log(logger, "Core dump reported: " + coreDumpId);
finishProcessing(context, coreDumpDirectory, sharedCoreKey);
}
private CoreDumpMetadata gatherMetadata(NodeAgentContext context, ContainerPath coreDumpDirectory) {
ContainerPath metadataPath = coreDumpDirectory.resolve(METADATA2_FILE_NAME);
Optional<ReportCoreDumpRequest> request = ReportCoreDumpRequest.load(metadataPath);
if (request.isPresent()) {
return request.map(requestInstance -> {
var metadata = new CoreDumpMetadata();
requestInstance.populateMetadata(metadata, FileSystems.getDefault());
return metadata;
})
.get();
}
ContainerPath coreDumpFile = findCoredumpFileInProcessingDirectory(coreDumpDirectory);
CoreDumpMetadata metadata = coreCollector.collect2(context, coreDumpFile);
metadata.setCpuMicrocodeVersion(getMicrocodeVersion())
.setKernelVersion(System.getProperty("os.version"))
.setCoreDumpPath(doneCoredumpsPath.resolve(context.containerName().asString())
.resolve(coreDumpDirectory.getFileName().toString())
.resolve(coreDumpFile.getFileName().toString()));
ReportCoreDumpRequest requestInstance = new ReportCoreDumpRequest();
requestInstance.fillFrom(metadata);
requestInstance.save(metadataPath);
context.log(logger, "Wrote " + metadataPath.pathOnHost());
return metadata;
}
private String getMicrocodeVersion() {
String output = uncheck(() -> Files.readAllLines(Paths.get("/proc/cpuinfo")).stream()
.filter(line -> line.startsWith("microcode"))
.findFirst()
.orElse("microcode : UNKNOWN"));
String[] results = output.split(":");
if (results.length != 2) {
throw ConvergenceException.ofError("Result from detect microcode command not as expected: " + output);
}
return results[1].trim();
}
} |
How do you figure that? Afaik this payload is autogenerated from com.yahoo.vespa.hosted.controller.api.application.v4.model.DeploymentData, which sets tags on line 655 in ApplicationController. | public static PrepareParams fromHttpRequest(HttpRequest request, TenantName tenant, Duration barrierTimeout) {
return new Builder().ignoreValidationErrors(request.getBooleanProperty(IGNORE_VALIDATION_PARAM_NAME))
.dryRun(request.getBooleanProperty(DRY_RUN_PARAM_NAME))
.verbose(request.getBooleanProperty(VERBOSE_PARAM_NAME))
.timeoutBudget(SessionHandler.getTimeoutBudget(request, barrierTimeout))
.applicationId(createApplicationId(request, tenant))
.tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME)))
.vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
.containerEndpoints(request.getProperty(CONTAINER_ENDPOINTS_PARAM_NAME))
.endpointCertificateMetadata(request.getProperty(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME))
.dockerImageRepository(request.getProperty(DOCKER_IMAGE_REPOSITORY))
.athenzDomain(request.getProperty(ATHENZ_DOMAIN))
.quota(request.getProperty(QUOTA_PARAM_NAME))
.tenantSecretStores(request.getProperty(TENANT_SECRET_STORES_PARAM_NAME))
.force(request.getBooleanProperty(FORCE_PARAM_NAME))
.waitForResourcesInPrepare(request.getBooleanProperty(WAIT_FOR_RESOURCES_IN_PREPARE))
.build();
} | .tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME))) | public static PrepareParams fromHttpRequest(HttpRequest request, TenantName tenant, Duration barrierTimeout) {
return new Builder().ignoreValidationErrors(request.getBooleanProperty(IGNORE_VALIDATION_PARAM_NAME))
.dryRun(request.getBooleanProperty(DRY_RUN_PARAM_NAME))
.verbose(request.getBooleanProperty(VERBOSE_PARAM_NAME))
.timeoutBudget(SessionHandler.getTimeoutBudget(request, barrierTimeout))
.applicationId(createApplicationId(request, tenant))
.tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME)))
.vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
.containerEndpoints(request.getProperty(CONTAINER_ENDPOINTS_PARAM_NAME))
.endpointCertificateMetadata(request.getProperty(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME))
.dockerImageRepository(request.getProperty(DOCKER_IMAGE_REPOSITORY))
.athenzDomain(request.getProperty(ATHENZ_DOMAIN))
.quota(request.getProperty(QUOTA_PARAM_NAME))
.tenantSecretStores(request.getProperty(TENANT_SECRET_STORES_PARAM_NAME))
.force(request.getBooleanProperty(FORCE_PARAM_NAME))
.waitForResourcesInPrepare(request.getBooleanProperty(WAIT_FOR_RESOURCES_IN_PREPARE))
.build();
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private boolean force = false;
private boolean waitForResourcesInPrepare = false;
private ApplicationId applicationId = null;
private Tags tags = Tags.empty();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = null;
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
private Optional<Quota> quota = Optional.empty();
private List<TenantSecretStore> tenantSecretStores = List.of();
private List<X509Certificate> operatorCertificates = List.of();
private Optional<CloudAccount> cloudAccount = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder tags(Tags tags) {
this.tags = tags;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
this.containerEndpoints = (serialized == null)
? List.of()
: ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized));
return this;
}
public Builder containerEndpointList(List<ContainerEndpoint> endpoints) {
this.containerEndpoints = endpoints;
return this;
}
public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) {
this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata);
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
: Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder dockerImageRepository(String dockerImageRepository) {
this.dockerImageRepository = (dockerImageRepository == null)
? Optional.empty()
: Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
this.dockerImageRepository = Optional.ofNullable(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain);
return this;
}
public Builder quota(Quota quota) {
this.quota = Optional.ofNullable(quota);
return this;
}
public Builder quota(String serialized) {
this.quota = (serialized == null)
? Optional.empty()
: Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder tenantSecretStores(String serialized) {
List<TenantSecretStore> secretStores = (serialized == null)
? List.of()
: TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get());
return tenantSecretStores(secretStores);
}
public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) {
this.tenantSecretStores = tenantSecretStores;
return this;
}
public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) {
this.waitForResourcesInPrepare = waitForResourcesInPrepare;
return this;
}
public Builder force(boolean force) {
this.force = force;
return this;
}
public Builder operatorCertificates(List<X509Certificate> operatorCertificates) {
this.operatorCertificates = List.copyOf(operatorCertificates);
return this;
}
public Builder cloudAccount(CloudAccount cloudAccount) {
this.cloudAccount = Optional.ofNullable(cloudAccount);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId,
tags,
timeoutBudget,
ignoreValidationErrors,
dryRun,
verbose,
isBootstrap,
vespaVersion,
containerEndpoints,
endpointCertificateMetadata,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
force,
waitForResourcesInPrepare,
operatorCertificates,
cloudAccount);
}
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private boolean force = false;
private boolean waitForResourcesInPrepare = false;
private ApplicationId applicationId = null;
private Tags tags = Tags.empty();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = null;
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
private Optional<Quota> quota = Optional.empty();
private List<TenantSecretStore> tenantSecretStores = List.of();
private List<X509Certificate> operatorCertificates = List.of();
private Optional<CloudAccount> cloudAccount = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder tags(Tags tags) {
this.tags = tags;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
this.containerEndpoints = (serialized == null)
? List.of()
: ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized));
return this;
}
public Builder containerEndpointList(List<ContainerEndpoint> endpoints) {
this.containerEndpoints = endpoints;
return this;
}
public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) {
this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata);
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
: Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder dockerImageRepository(String dockerImageRepository) {
this.dockerImageRepository = (dockerImageRepository == null)
? Optional.empty()
: Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
this.dockerImageRepository = Optional.ofNullable(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain);
return this;
}
public Builder quota(Quota quota) {
this.quota = Optional.ofNullable(quota);
return this;
}
public Builder quota(String serialized) {
this.quota = (serialized == null)
? Optional.empty()
: Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder tenantSecretStores(String serialized) {
List<TenantSecretStore> secretStores = (serialized == null)
? List.of()
: TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get());
return tenantSecretStores(secretStores);
}
public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) {
this.tenantSecretStores = tenantSecretStores;
return this;
}
public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) {
this.waitForResourcesInPrepare = waitForResourcesInPrepare;
return this;
}
public Builder force(boolean force) {
this.force = force;
return this;
}
public Builder operatorCertificates(List<X509Certificate> operatorCertificates) {
this.operatorCertificates = List.copyOf(operatorCertificates);
return this;
}
public Builder cloudAccount(CloudAccount cloudAccount) {
this.cloudAccount = Optional.ofNullable(cloudAccount);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId,
tags,
timeoutBudget,
ignoreValidationErrors,
dryRun,
verbose,
isBootstrap,
vespaVersion,
containerEndpoints,
endpointCertificateMetadata,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
force,
waitForResourcesInPrepare,
operatorCertificates,
cloudAccount);
}
} |
It is for storing all relevant deployment meta data to our audit store. If a certain override includes a file which isn't included in the default path taken here, that file will not be stored. It's not critical. Perhaps we could instead run only the include processor, and not the override one? Would that include all possible variants? (We don't care about the generated XML, but all files read by the inclusion process will be loaded to the file cache, which is then flushed to the audit storage.) | private void preProcessAndPopulateCache() {
FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile));
if (servicesXml.exists())
try {
new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")),
new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8),
InstanceName.defaultName(),
Environment.prod,
RegionName.defaultName(),
Tags.empty())
.run();
}
catch (IllegalArgumentException e) {
throw e;
}
catch (Exception e) {
throw new IllegalArgumentException(e);
}
} | Tags.empty()) | private void preProcessAndPopulateCache() {
FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile));
if (servicesXml.exists())
try {
new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")),
new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8),
InstanceName.defaultName(),
Environment.prod,
RegionName.defaultName(),
Tags.empty())
.run();
}
catch (IllegalArgumentException e) {
throw e;
}
catch (Exception e) {
throw new IllegalArgumentException(e);
}
} | class ApplicationPackage {
private static final String trustedCertificatesFile = "security/clients.pem";
private static final String buildMetaFile = "build-meta.json";
static final String deploymentFile = "deployment.xml";
private static final String validationOverridesFile = "validation-overrides.xml";
static final String servicesFile = "services.xml";
private final String contentHash;
private final String bundleHash;
private final byte[] zippedContent;
private final DeploymentSpec deploymentSpec;
private final ValidationOverrides validationOverrides;
private final ZipArchiveCache files;
private final Optional<Version> compileVersion;
private final Optional<Instant> buildTime;
private final Optional<Version> parentVersion;
private final List<X509Certificate> trustedCertificates;
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
*/
public ApplicationPackage(byte[] zippedContent) {
this(zippedContent, false);
}
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
* If 'requireFiles' is true, files needed by deployment orchestration must be present.
*/
@SuppressWarnings("deprecation")
public ApplicationPackage(byte[] zippedContent, boolean requireFiles) {
this.zippedContent = Objects.requireNonNull(zippedContent, "The application package content cannot be null");
this.contentHash = Hashing.sha1().hashBytes(zippedContent).toString();
this.files = new ZipArchiveCache(zippedContent, Set.of(deploymentFile, validationOverridesFile, servicesFile, buildMetaFile, trustedCertificatesFile));
Optional<DeploymentSpec> deploymentSpec = files.get(deploymentFile).map(bytes -> new String(bytes, UTF_8)).map(DeploymentSpec::fromXml);
if (requireFiles && deploymentSpec.isEmpty())
throw new IllegalArgumentException("Missing required file '" + deploymentFile + "'");
this.deploymentSpec = deploymentSpec.orElse(DeploymentSpec.empty);
this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty);
Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get);
this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString())));
this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong())));
this.parentVersion = buildMetaObject.flatMap(object -> parse(object, "parentVersion", field -> Version.fromString(field.asString())));
this.trustedCertificates = files.get(trustedCertificatesFile).map(bytes -> X509CertificateUtils.certificateListFromPem(new String(bytes, UTF_8))).orElse(List.of());
this.bundleHash = calculateBundleHash(zippedContent);
preProcessAndPopulateCache();
}
/** Returns a copy of this with the given certificate appended. */
public ApplicationPackage withTrustedCertificate(X509Certificate certificate) {
List<X509Certificate> trustedCertificates = new ArrayList<>(this.trustedCertificates);
trustedCertificates.add(certificate);
byte[] certificatesBytes = X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8);
ByteArrayOutputStream modified = new ByteArrayOutputStream(zippedContent.length + certificatesBytes.length);
ZipEntries.transferAndWrite(modified, new ByteArrayInputStream(zippedContent), trustedCertificatesFile, certificatesBytes);
return new ApplicationPackage(modified.toByteArray());
}
/** Returns a hash of the content of this package */
public String hash() { return contentHash; }
/** Hash of all files and settings that influence what is deployed to config servers. */
public String bundleHash() {
return bundleHash;
}
/** Returns the content of this package. The content <b>must not</b> be modified. */
public byte[] zippedContent() { return zippedContent; }
/**
* Returns the deployment spec from the deployment.xml file of the package content.<br>
* This is the DeploymentSpec.empty instance if this package does not contain a deployment.xml file.<br>
* <em>NB: <strong>Always</strong> read deployment spec from the {@link Application}, for deployment orchestration.</em>
*/
public DeploymentSpec deploymentSpec() { return deploymentSpec; }
/**
* Returns the validation overrides from the validation-overrides.xml file of the package content.
* This is the ValidationOverrides.empty instance if this package does not contain a validation-overrides.xml file.
*/
public ValidationOverrides validationOverrides() { return validationOverrides; }
/** Returns the platform version which package was compiled against, if known. */
public Optional<Version> compileVersion() { return compileVersion; }
/** Returns the time this package was built, if known. */
public Optional<Instant> buildTime() { return buildTime; }
/** Returns the parent version used to compile the package, if known. */
public Optional<Version> parentVersion() { return parentVersion; }
/** Returns the list of certificates trusted by this application, or an empty list if no trust configured. */
public List<X509Certificate> trustedCertificates() {
return trustedCertificates;
}
private static <Type> Optional<Type> parse(Inspector buildMetaObject, String fieldName, Function<Inspector, Type> mapper) {
Inspector field = buildMetaObject.field(fieldName);
if ( ! field.valid() || field.type() == NIX)
return Optional.empty();
try {
return Optional.of(mapper.apply(buildMetaObject.field(fieldName)));
}
catch (RuntimeException e) {
throw new IllegalArgumentException("Failed parsing \"" + fieldName + "\" in '" + buildMetaFile + "': " + Exceptions.toMessageString(e));
}
}
/** Creates a valid application package that will remove all application's deployments */
public static ApplicationPackage deploymentRemoval() {
return new ApplicationPackage(filesZip(Map.of(validationOverridesFile, allValidationOverrides().xmlForm().getBytes(UTF_8),
deploymentFile, DeploymentSpec.empty.xmlForm().getBytes(UTF_8))));
}
/** Returns a zip containing meta data about deployments of this package by the given job. */
public byte[] metaDataZip() {
return cacheZip();
}
private byte[] cacheZip() {
return filesZip(files.cache.entrySet().stream()
.filter(entry -> entry.getValue().isPresent())
.collect(toMap(entry -> entry.getKey().toString(),
entry -> entry.getValue().get())));
}
public static byte[] filesZip(Map<String, byte[]> files) {
try (ZipBuilder zipBuilder = new ZipBuilder(files.values().stream().mapToInt(bytes -> bytes.length).sum() + 512)) {
files.forEach(zipBuilder::add);
zipBuilder.close();
return zipBuilder.toByteArray();
}
}
private static ValidationOverrides allValidationOverrides() {
String until = DateTimeFormatter.ISO_LOCAL_DATE.format(Instant.now().plus(Duration.ofDays(25)).atZone(ZoneOffset.UTC));
StringBuilder validationOverridesContents = new StringBuilder(1000);
validationOverridesContents.append("<validation-overrides version=\"1.0\">\n");
for (ValidationId validationId: ValidationId.values())
validationOverridesContents.append("\t<allow until=\"").append(until).append("\">").append(validationId.value()).append("</allow>\n");
validationOverridesContents.append("</validation-overrides>\n");
return ValidationOverrides.fromXml(validationOverridesContents.toString());
}
@SuppressWarnings("deprecation")
private String calculateBundleHash(byte[] zippedContent) {
Predicate<String> entryMatcher = name -> ! name.endsWith(deploymentFile) && ! name.endsWith(buildMetaFile);
SortedMap<String, Long> crcByEntry = new TreeMap<>();
Options options = Options.standard().pathPredicate(entryMatcher);
ArchiveFile file;
try (ArchiveStreamReader reader = ArchiveStreamReader.ofZip(new ByteArrayInputStream(zippedContent), options)) {
OutputStream discard = OutputStream.nullOutputStream();
while ((file = reader.readNextTo(discard)) != null) {
crcByEntry.put(file.path().toString(), file.crc32().orElse(-1));
}
}
Funnel<SortedMap<String, Long>> funnel = (from, into) -> from.forEach((key, value) -> {
into.putBytes(key.getBytes());
into.putLong(value);
});
return Hashing.sha1().newHasher()
.putObject(crcByEntry, funnel)
.putInt(deploymentSpec.deployableHashCode())
.hash().toString();
}
@SuppressWarnings("deprecation")
public static String calculateHash(byte[] bytes) {
return Hashing.sha1().newHasher()
.putBytes(bytes)
.hash().toString();
}
/** Maps normalized paths to cached content read from a zip archive. */
private static class ZipArchiveCache {
/** Max size of each extracted file */
private static final int maxSize = 10 << 20;
private final byte[] zip;
private final Map<Path, Optional<byte[]>> cache;
public ZipArchiveCache(byte[] zip, Collection<String> prePopulated) {
this.zip = zip;
this.cache = new ConcurrentSkipListMap<>();
this.cache.putAll(read(prePopulated));
}
public Optional<byte[]> get(String path) {
return get(Paths.get(path));
}
public Optional<byte[]> get(Path path) {
return cache.computeIfAbsent(path.normalize(), read(List.of(path.normalize().toString()))::get);
}
public FileSystemWrapper wrapper() {
return FileSystemWrapper.ofFiles(Path.of("./"),
path -> get(path).isPresent(),
path -> get(path).orElseThrow(() -> new NoSuchFileException(path.toString())));
}
private Map<Path, Optional<byte[]>> read(Collection<String> names) {
var entries = ZipEntries.from(zip,
name -> names.contains(name),
maxSize,
true)
.asList().stream()
.collect(toMap(entry -> Paths.get(entry.name()).normalize(),
ZipEntries.ZipEntryWithContent::content));
names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty()));
return entries;
}
}
} | class ApplicationPackage {
private static final String trustedCertificatesFile = "security/clients.pem";
private static final String buildMetaFile = "build-meta.json";
static final String deploymentFile = "deployment.xml";
private static final String validationOverridesFile = "validation-overrides.xml";
static final String servicesFile = "services.xml";
private final String contentHash;
private final String bundleHash;
private final byte[] zippedContent;
private final DeploymentSpec deploymentSpec;
private final ValidationOverrides validationOverrides;
private final ZipArchiveCache files;
private final Optional<Version> compileVersion;
private final Optional<Instant> buildTime;
private final Optional<Version> parentVersion;
private final List<X509Certificate> trustedCertificates;
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
*/
public ApplicationPackage(byte[] zippedContent) {
this(zippedContent, false);
}
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
* If 'requireFiles' is true, files needed by deployment orchestration must be present.
*/
@SuppressWarnings("deprecation")
public ApplicationPackage(byte[] zippedContent, boolean requireFiles) {
this.zippedContent = Objects.requireNonNull(zippedContent, "The application package content cannot be null");
this.contentHash = Hashing.sha1().hashBytes(zippedContent).toString();
this.files = new ZipArchiveCache(zippedContent, Set.of(deploymentFile, validationOverridesFile, servicesFile, buildMetaFile, trustedCertificatesFile));
Optional<DeploymentSpec> deploymentSpec = files.get(deploymentFile).map(bytes -> new String(bytes, UTF_8)).map(DeploymentSpec::fromXml);
if (requireFiles && deploymentSpec.isEmpty())
throw new IllegalArgumentException("Missing required file '" + deploymentFile + "'");
this.deploymentSpec = deploymentSpec.orElse(DeploymentSpec.empty);
this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty);
Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get);
this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString())));
this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong())));
this.parentVersion = buildMetaObject.flatMap(object -> parse(object, "parentVersion", field -> Version.fromString(field.asString())));
this.trustedCertificates = files.get(trustedCertificatesFile).map(bytes -> X509CertificateUtils.certificateListFromPem(new String(bytes, UTF_8))).orElse(List.of());
this.bundleHash = calculateBundleHash(zippedContent);
preProcessAndPopulateCache();
}
/** Returns a copy of this with the given certificate appended. */
public ApplicationPackage withTrustedCertificate(X509Certificate certificate) {
List<X509Certificate> trustedCertificates = new ArrayList<>(this.trustedCertificates);
trustedCertificates.add(certificate);
byte[] certificatesBytes = X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8);
ByteArrayOutputStream modified = new ByteArrayOutputStream(zippedContent.length + certificatesBytes.length);
ZipEntries.transferAndWrite(modified, new ByteArrayInputStream(zippedContent), trustedCertificatesFile, certificatesBytes);
return new ApplicationPackage(modified.toByteArray());
}
/** Returns a hash of the content of this package */
public String hash() { return contentHash; }
/** Hash of all files and settings that influence what is deployed to config servers. */
public String bundleHash() {
return bundleHash;
}
/** Returns the content of this package. The content <b>must not</b> be modified. */
public byte[] zippedContent() { return zippedContent; }
/**
* Returns the deployment spec from the deployment.xml file of the package content.<br>
* This is the DeploymentSpec.empty instance if this package does not contain a deployment.xml file.<br>
* <em>NB: <strong>Always</strong> read deployment spec from the {@link Application}, for deployment orchestration.</em>
*/
public DeploymentSpec deploymentSpec() { return deploymentSpec; }
/**
* Returns the validation overrides from the validation-overrides.xml file of the package content.
* This is the ValidationOverrides.empty instance if this package does not contain a validation-overrides.xml file.
*/
public ValidationOverrides validationOverrides() { return validationOverrides; }
/** Returns the platform version which package was compiled against, if known. */
public Optional<Version> compileVersion() { return compileVersion; }
/** Returns the time this package was built, if known. */
public Optional<Instant> buildTime() { return buildTime; }
/** Returns the parent version used to compile the package, if known. */
public Optional<Version> parentVersion() { return parentVersion; }
/** Returns the list of certificates trusted by this application, or an empty list if no trust configured. */
public List<X509Certificate> trustedCertificates() {
return trustedCertificates;
}
private static <Type> Optional<Type> parse(Inspector buildMetaObject, String fieldName, Function<Inspector, Type> mapper) {
Inspector field = buildMetaObject.field(fieldName);
if ( ! field.valid() || field.type() == NIX)
return Optional.empty();
try {
return Optional.of(mapper.apply(buildMetaObject.field(fieldName)));
}
catch (RuntimeException e) {
throw new IllegalArgumentException("Failed parsing \"" + fieldName + "\" in '" + buildMetaFile + "': " + Exceptions.toMessageString(e));
}
}
/** Creates a valid application package that will remove all application's deployments */
public static ApplicationPackage deploymentRemoval() {
return new ApplicationPackage(filesZip(Map.of(validationOverridesFile, allValidationOverrides().xmlForm().getBytes(UTF_8),
deploymentFile, DeploymentSpec.empty.xmlForm().getBytes(UTF_8))));
}
/** Returns a zip containing meta data about deployments of this package by the given job. */
public byte[] metaDataZip() {
return cacheZip();
}
private byte[] cacheZip() {
return filesZip(files.cache.entrySet().stream()
.filter(entry -> entry.getValue().isPresent())
.collect(toMap(entry -> entry.getKey().toString(),
entry -> entry.getValue().get())));
}
public static byte[] filesZip(Map<String, byte[]> files) {
try (ZipBuilder zipBuilder = new ZipBuilder(files.values().stream().mapToInt(bytes -> bytes.length).sum() + 512)) {
files.forEach(zipBuilder::add);
zipBuilder.close();
return zipBuilder.toByteArray();
}
}
private static ValidationOverrides allValidationOverrides() {
String until = DateTimeFormatter.ISO_LOCAL_DATE.format(Instant.now().plus(Duration.ofDays(25)).atZone(ZoneOffset.UTC));
StringBuilder validationOverridesContents = new StringBuilder(1000);
validationOverridesContents.append("<validation-overrides version=\"1.0\">\n");
for (ValidationId validationId: ValidationId.values())
validationOverridesContents.append("\t<allow until=\"").append(until).append("\">").append(validationId.value()).append("</allow>\n");
validationOverridesContents.append("</validation-overrides>\n");
return ValidationOverrides.fromXml(validationOverridesContents.toString());
}
@SuppressWarnings("deprecation")
private String calculateBundleHash(byte[] zippedContent) {
Predicate<String> entryMatcher = name -> ! name.endsWith(deploymentFile) && ! name.endsWith(buildMetaFile);
SortedMap<String, Long> crcByEntry = new TreeMap<>();
Options options = Options.standard().pathPredicate(entryMatcher);
ArchiveFile file;
try (ArchiveStreamReader reader = ArchiveStreamReader.ofZip(new ByteArrayInputStream(zippedContent), options)) {
OutputStream discard = OutputStream.nullOutputStream();
while ((file = reader.readNextTo(discard)) != null) {
crcByEntry.put(file.path().toString(), file.crc32().orElse(-1));
}
}
Funnel<SortedMap<String, Long>> funnel = (from, into) -> from.forEach((key, value) -> {
into.putBytes(key.getBytes());
into.putLong(value);
});
return Hashing.sha1().newHasher()
.putObject(crcByEntry, funnel)
.putInt(deploymentSpec.deployableHashCode())
.hash().toString();
}
@SuppressWarnings("deprecation")
public static String calculateHash(byte[] bytes) {
return Hashing.sha1().newHasher()
.putBytes(bytes)
.hash().toString();
}
/** Maps normalized paths to cached content read from a zip archive. */
private static class ZipArchiveCache {
/** Max size of each extracted file */
private static final int maxSize = 10 << 20;
private final byte[] zip;
private final Map<Path, Optional<byte[]>> cache;
public ZipArchiveCache(byte[] zip, Collection<String> prePopulated) {
this.zip = zip;
this.cache = new ConcurrentSkipListMap<>();
this.cache.putAll(read(prePopulated));
}
public Optional<byte[]> get(String path) {
return get(Paths.get(path));
}
public Optional<byte[]> get(Path path) {
return cache.computeIfAbsent(path.normalize(), read(List.of(path.normalize().toString()))::get);
}
public FileSystemWrapper wrapper() {
return FileSystemWrapper.ofFiles(Path.of("./"),
path -> get(path).isPresent(),
path -> get(path).orElseThrow(() -> new NoSuchFileException(path.toString())));
}
private Map<Path, Optional<byte[]>> read(Collection<String> names) {
var entries = ZipEntries.from(zip,
name -> names.contains(name),
maxSize,
true)
.asList().stream()
.collect(toMap(entry -> Paths.get(entry.name()).normalize(),
ZipEntries.ZipEntryWithContent::content));
names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty()));
return entries;
}
}
} |
You don't need to fix this in this PR. | private void preProcessAndPopulateCache() {
FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile));
if (servicesXml.exists())
try {
new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")),
new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8),
InstanceName.defaultName(),
Environment.prod,
RegionName.defaultName(),
Tags.empty())
.run();
}
catch (IllegalArgumentException e) {
throw e;
}
catch (Exception e) {
throw new IllegalArgumentException(e);
}
} | Tags.empty()) | private void preProcessAndPopulateCache() {
FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile));
if (servicesXml.exists())
try {
new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")),
new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8),
InstanceName.defaultName(),
Environment.prod,
RegionName.defaultName(),
Tags.empty())
.run();
}
catch (IllegalArgumentException e) {
throw e;
}
catch (Exception e) {
throw new IllegalArgumentException(e);
}
} | class ApplicationPackage {
private static final String trustedCertificatesFile = "security/clients.pem";
private static final String buildMetaFile = "build-meta.json";
static final String deploymentFile = "deployment.xml";
private static final String validationOverridesFile = "validation-overrides.xml";
static final String servicesFile = "services.xml";
private final String contentHash;
private final String bundleHash;
private final byte[] zippedContent;
private final DeploymentSpec deploymentSpec;
private final ValidationOverrides validationOverrides;
private final ZipArchiveCache files;
private final Optional<Version> compileVersion;
private final Optional<Instant> buildTime;
private final Optional<Version> parentVersion;
private final List<X509Certificate> trustedCertificates;
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
*/
public ApplicationPackage(byte[] zippedContent) {
this(zippedContent, false);
}
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
* If 'requireFiles' is true, files needed by deployment orchestration must be present.
*/
@SuppressWarnings("deprecation")
public ApplicationPackage(byte[] zippedContent, boolean requireFiles) {
this.zippedContent = Objects.requireNonNull(zippedContent, "The application package content cannot be null");
this.contentHash = Hashing.sha1().hashBytes(zippedContent).toString();
this.files = new ZipArchiveCache(zippedContent, Set.of(deploymentFile, validationOverridesFile, servicesFile, buildMetaFile, trustedCertificatesFile));
Optional<DeploymentSpec> deploymentSpec = files.get(deploymentFile).map(bytes -> new String(bytes, UTF_8)).map(DeploymentSpec::fromXml);
if (requireFiles && deploymentSpec.isEmpty())
throw new IllegalArgumentException("Missing required file '" + deploymentFile + "'");
this.deploymentSpec = deploymentSpec.orElse(DeploymentSpec.empty);
this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty);
Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get);
this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString())));
this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong())));
this.parentVersion = buildMetaObject.flatMap(object -> parse(object, "parentVersion", field -> Version.fromString(field.asString())));
this.trustedCertificates = files.get(trustedCertificatesFile).map(bytes -> X509CertificateUtils.certificateListFromPem(new String(bytes, UTF_8))).orElse(List.of());
this.bundleHash = calculateBundleHash(zippedContent);
preProcessAndPopulateCache();
}
/** Returns a copy of this with the given certificate appended. */
public ApplicationPackage withTrustedCertificate(X509Certificate certificate) {
List<X509Certificate> trustedCertificates = new ArrayList<>(this.trustedCertificates);
trustedCertificates.add(certificate);
byte[] certificatesBytes = X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8);
ByteArrayOutputStream modified = new ByteArrayOutputStream(zippedContent.length + certificatesBytes.length);
ZipEntries.transferAndWrite(modified, new ByteArrayInputStream(zippedContent), trustedCertificatesFile, certificatesBytes);
return new ApplicationPackage(modified.toByteArray());
}
/** Returns a hash of the content of this package */
public String hash() { return contentHash; }
/** Hash of all files and settings that influence what is deployed to config servers. */
public String bundleHash() {
return bundleHash;
}
/** Returns the content of this package. The content <b>must not</b> be modified. */
public byte[] zippedContent() { return zippedContent; }
/**
* Returns the deployment spec from the deployment.xml file of the package content.<br>
* This is the DeploymentSpec.empty instance if this package does not contain a deployment.xml file.<br>
* <em>NB: <strong>Always</strong> read deployment spec from the {@link Application}, for deployment orchestration.</em>
*/
public DeploymentSpec deploymentSpec() { return deploymentSpec; }
/**
* Returns the validation overrides from the validation-overrides.xml file of the package content.
* This is the ValidationOverrides.empty instance if this package does not contain a validation-overrides.xml file.
*/
public ValidationOverrides validationOverrides() { return validationOverrides; }
/** Returns the platform version which package was compiled against, if known. */
public Optional<Version> compileVersion() { return compileVersion; }
/** Returns the time this package was built, if known. */
public Optional<Instant> buildTime() { return buildTime; }
/** Returns the parent version used to compile the package, if known. */
public Optional<Version> parentVersion() { return parentVersion; }
/** Returns the list of certificates trusted by this application, or an empty list if no trust configured. */
public List<X509Certificate> trustedCertificates() {
return trustedCertificates;
}
private static <Type> Optional<Type> parse(Inspector buildMetaObject, String fieldName, Function<Inspector, Type> mapper) {
Inspector field = buildMetaObject.field(fieldName);
if ( ! field.valid() || field.type() == NIX)
return Optional.empty();
try {
return Optional.of(mapper.apply(buildMetaObject.field(fieldName)));
}
catch (RuntimeException e) {
throw new IllegalArgumentException("Failed parsing \"" + fieldName + "\" in '" + buildMetaFile + "': " + Exceptions.toMessageString(e));
}
}
/** Creates a valid application package that will remove all application's deployments */
public static ApplicationPackage deploymentRemoval() {
return new ApplicationPackage(filesZip(Map.of(validationOverridesFile, allValidationOverrides().xmlForm().getBytes(UTF_8),
deploymentFile, DeploymentSpec.empty.xmlForm().getBytes(UTF_8))));
}
/** Returns a zip containing meta data about deployments of this package by the given job. */
public byte[] metaDataZip() {
return cacheZip();
}
private byte[] cacheZip() {
return filesZip(files.cache.entrySet().stream()
.filter(entry -> entry.getValue().isPresent())
.collect(toMap(entry -> entry.getKey().toString(),
entry -> entry.getValue().get())));
}
public static byte[] filesZip(Map<String, byte[]> files) {
try (ZipBuilder zipBuilder = new ZipBuilder(files.values().stream().mapToInt(bytes -> bytes.length).sum() + 512)) {
files.forEach(zipBuilder::add);
zipBuilder.close();
return zipBuilder.toByteArray();
}
}
private static ValidationOverrides allValidationOverrides() {
String until = DateTimeFormatter.ISO_LOCAL_DATE.format(Instant.now().plus(Duration.ofDays(25)).atZone(ZoneOffset.UTC));
StringBuilder validationOverridesContents = new StringBuilder(1000);
validationOverridesContents.append("<validation-overrides version=\"1.0\">\n");
for (ValidationId validationId: ValidationId.values())
validationOverridesContents.append("\t<allow until=\"").append(until).append("\">").append(validationId.value()).append("</allow>\n");
validationOverridesContents.append("</validation-overrides>\n");
return ValidationOverrides.fromXml(validationOverridesContents.toString());
}
@SuppressWarnings("deprecation")
private String calculateBundleHash(byte[] zippedContent) {
Predicate<String> entryMatcher = name -> ! name.endsWith(deploymentFile) && ! name.endsWith(buildMetaFile);
SortedMap<String, Long> crcByEntry = new TreeMap<>();
Options options = Options.standard().pathPredicate(entryMatcher);
ArchiveFile file;
try (ArchiveStreamReader reader = ArchiveStreamReader.ofZip(new ByteArrayInputStream(zippedContent), options)) {
OutputStream discard = OutputStream.nullOutputStream();
while ((file = reader.readNextTo(discard)) != null) {
crcByEntry.put(file.path().toString(), file.crc32().orElse(-1));
}
}
Funnel<SortedMap<String, Long>> funnel = (from, into) -> from.forEach((key, value) -> {
into.putBytes(key.getBytes());
into.putLong(value);
});
return Hashing.sha1().newHasher()
.putObject(crcByEntry, funnel)
.putInt(deploymentSpec.deployableHashCode())
.hash().toString();
}
@SuppressWarnings("deprecation")
public static String calculateHash(byte[] bytes) {
return Hashing.sha1().newHasher()
.putBytes(bytes)
.hash().toString();
}
/** Maps normalized paths to cached content read from a zip archive. */
private static class ZipArchiveCache {
/** Max size of each extracted file */
private static final int maxSize = 10 << 20;
private final byte[] zip;
private final Map<Path, Optional<byte[]>> cache;
public ZipArchiveCache(byte[] zip, Collection<String> prePopulated) {
this.zip = zip;
this.cache = new ConcurrentSkipListMap<>();
this.cache.putAll(read(prePopulated));
}
public Optional<byte[]> get(String path) {
return get(Paths.get(path));
}
public Optional<byte[]> get(Path path) {
return cache.computeIfAbsent(path.normalize(), read(List.of(path.normalize().toString()))::get);
}
public FileSystemWrapper wrapper() {
return FileSystemWrapper.ofFiles(Path.of("./"),
path -> get(path).isPresent(),
path -> get(path).orElseThrow(() -> new NoSuchFileException(path.toString())));
}
private Map<Path, Optional<byte[]>> read(Collection<String> names) {
var entries = ZipEntries.from(zip,
name -> names.contains(name),
maxSize,
true)
.asList().stream()
.collect(toMap(entry -> Paths.get(entry.name()).normalize(),
ZipEntries.ZipEntryWithContent::content));
names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty()));
return entries;
}
}
} | class ApplicationPackage {
private static final String trustedCertificatesFile = "security/clients.pem";
private static final String buildMetaFile = "build-meta.json";
static final String deploymentFile = "deployment.xml";
private static final String validationOverridesFile = "validation-overrides.xml";
static final String servicesFile = "services.xml";
private final String contentHash;
private final String bundleHash;
private final byte[] zippedContent;
private final DeploymentSpec deploymentSpec;
private final ValidationOverrides validationOverrides;
private final ZipArchiveCache files;
private final Optional<Version> compileVersion;
private final Optional<Instant> buildTime;
private final Optional<Version> parentVersion;
private final List<X509Certificate> trustedCertificates;
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
*/
public ApplicationPackage(byte[] zippedContent) {
this(zippedContent, false);
}
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
* If 'requireFiles' is true, files needed by deployment orchestration must be present.
*/
@SuppressWarnings("deprecation")
public ApplicationPackage(byte[] zippedContent, boolean requireFiles) {
this.zippedContent = Objects.requireNonNull(zippedContent, "The application package content cannot be null");
this.contentHash = Hashing.sha1().hashBytes(zippedContent).toString();
this.files = new ZipArchiveCache(zippedContent, Set.of(deploymentFile, validationOverridesFile, servicesFile, buildMetaFile, trustedCertificatesFile));
Optional<DeploymentSpec> deploymentSpec = files.get(deploymentFile).map(bytes -> new String(bytes, UTF_8)).map(DeploymentSpec::fromXml);
if (requireFiles && deploymentSpec.isEmpty())
throw new IllegalArgumentException("Missing required file '" + deploymentFile + "'");
this.deploymentSpec = deploymentSpec.orElse(DeploymentSpec.empty);
this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty);
Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get);
this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString())));
this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong())));
this.parentVersion = buildMetaObject.flatMap(object -> parse(object, "parentVersion", field -> Version.fromString(field.asString())));
this.trustedCertificates = files.get(trustedCertificatesFile).map(bytes -> X509CertificateUtils.certificateListFromPem(new String(bytes, UTF_8))).orElse(List.of());
this.bundleHash = calculateBundleHash(zippedContent);
preProcessAndPopulateCache();
}
/** Returns a copy of this with the given certificate appended. */
public ApplicationPackage withTrustedCertificate(X509Certificate certificate) {
List<X509Certificate> trustedCertificates = new ArrayList<>(this.trustedCertificates);
trustedCertificates.add(certificate);
byte[] certificatesBytes = X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8);
ByteArrayOutputStream modified = new ByteArrayOutputStream(zippedContent.length + certificatesBytes.length);
ZipEntries.transferAndWrite(modified, new ByteArrayInputStream(zippedContent), trustedCertificatesFile, certificatesBytes);
return new ApplicationPackage(modified.toByteArray());
}
/** Returns a hash of the content of this package */
public String hash() { return contentHash; }
/** Hash of all files and settings that influence what is deployed to config servers. */
public String bundleHash() {
return bundleHash;
}
/** Returns the content of this package. The content <b>must not</b> be modified. */
public byte[] zippedContent() { return zippedContent; }
/**
* Returns the deployment spec from the deployment.xml file of the package content.<br>
* This is the DeploymentSpec.empty instance if this package does not contain a deployment.xml file.<br>
* <em>NB: <strong>Always</strong> read deployment spec from the {@link Application}, for deployment orchestration.</em>
*/
public DeploymentSpec deploymentSpec() { return deploymentSpec; }
/**
* Returns the validation overrides from the validation-overrides.xml file of the package content.
* This is the ValidationOverrides.empty instance if this package does not contain a validation-overrides.xml file.
*/
public ValidationOverrides validationOverrides() { return validationOverrides; }
/** Returns the platform version which package was compiled against, if known. */
public Optional<Version> compileVersion() { return compileVersion; }
/** Returns the time this package was built, if known. */
public Optional<Instant> buildTime() { return buildTime; }
/** Returns the parent version used to compile the package, if known. */
public Optional<Version> parentVersion() { return parentVersion; }
/** Returns the list of certificates trusted by this application, or an empty list if no trust configured. */
public List<X509Certificate> trustedCertificates() {
return trustedCertificates;
}
private static <Type> Optional<Type> parse(Inspector buildMetaObject, String fieldName, Function<Inspector, Type> mapper) {
Inspector field = buildMetaObject.field(fieldName);
if ( ! field.valid() || field.type() == NIX)
return Optional.empty();
try {
return Optional.of(mapper.apply(buildMetaObject.field(fieldName)));
}
catch (RuntimeException e) {
throw new IllegalArgumentException("Failed parsing \"" + fieldName + "\" in '" + buildMetaFile + "': " + Exceptions.toMessageString(e));
}
}
/** Creates a valid application package that will remove all application's deployments */
public static ApplicationPackage deploymentRemoval() {
return new ApplicationPackage(filesZip(Map.of(validationOverridesFile, allValidationOverrides().xmlForm().getBytes(UTF_8),
deploymentFile, DeploymentSpec.empty.xmlForm().getBytes(UTF_8))));
}
/** Returns a zip containing meta data about deployments of this package by the given job. */
public byte[] metaDataZip() {
return cacheZip();
}
private byte[] cacheZip() {
return filesZip(files.cache.entrySet().stream()
.filter(entry -> entry.getValue().isPresent())
.collect(toMap(entry -> entry.getKey().toString(),
entry -> entry.getValue().get())));
}
public static byte[] filesZip(Map<String, byte[]> files) {
try (ZipBuilder zipBuilder = new ZipBuilder(files.values().stream().mapToInt(bytes -> bytes.length).sum() + 512)) {
files.forEach(zipBuilder::add);
zipBuilder.close();
return zipBuilder.toByteArray();
}
}
private static ValidationOverrides allValidationOverrides() {
String until = DateTimeFormatter.ISO_LOCAL_DATE.format(Instant.now().plus(Duration.ofDays(25)).atZone(ZoneOffset.UTC));
StringBuilder validationOverridesContents = new StringBuilder(1000);
validationOverridesContents.append("<validation-overrides version=\"1.0\">\n");
for (ValidationId validationId: ValidationId.values())
validationOverridesContents.append("\t<allow until=\"").append(until).append("\">").append(validationId.value()).append("</allow>\n");
validationOverridesContents.append("</validation-overrides>\n");
return ValidationOverrides.fromXml(validationOverridesContents.toString());
}
@SuppressWarnings("deprecation")
private String calculateBundleHash(byte[] zippedContent) {
Predicate<String> entryMatcher = name -> ! name.endsWith(deploymentFile) && ! name.endsWith(buildMetaFile);
SortedMap<String, Long> crcByEntry = new TreeMap<>();
Options options = Options.standard().pathPredicate(entryMatcher);
ArchiveFile file;
try (ArchiveStreamReader reader = ArchiveStreamReader.ofZip(new ByteArrayInputStream(zippedContent), options)) {
OutputStream discard = OutputStream.nullOutputStream();
while ((file = reader.readNextTo(discard)) != null) {
crcByEntry.put(file.path().toString(), file.crc32().orElse(-1));
}
}
Funnel<SortedMap<String, Long>> funnel = (from, into) -> from.forEach((key, value) -> {
into.putBytes(key.getBytes());
into.putLong(value);
});
return Hashing.sha1().newHasher()
.putObject(crcByEntry, funnel)
.putInt(deploymentSpec.deployableHashCode())
.hash().toString();
}
@SuppressWarnings("deprecation")
public static String calculateHash(byte[] bytes) {
return Hashing.sha1().newHasher()
.putBytes(bytes)
.hash().toString();
}
/** Maps normalized paths to cached content read from a zip archive. */
private static class ZipArchiveCache {
/** Max size of each extracted file */
private static final int maxSize = 10 << 20;
private final byte[] zip;
private final Map<Path, Optional<byte[]>> cache;
public ZipArchiveCache(byte[] zip, Collection<String> prePopulated) {
this.zip = zip;
this.cache = new ConcurrentSkipListMap<>();
this.cache.putAll(read(prePopulated));
}
public Optional<byte[]> get(String path) {
return get(Paths.get(path));
}
public Optional<byte[]> get(Path path) {
return cache.computeIfAbsent(path.normalize(), read(List.of(path.normalize().toString()))::get);
}
public FileSystemWrapper wrapper() {
return FileSystemWrapper.ofFiles(Path.of("./"),
path -> get(path).isPresent(),
path -> get(path).orElseThrow(() -> new NoSuchFileException(path.toString())));
}
private Map<Path, Optional<byte[]>> read(Collection<String> names) {
var entries = ZipEntries.from(zip,
name -> names.contains(name),
maxSize,
true)
.asList().stream()
.collect(toMap(entry -> Paths.get(entry.name()).normalize(),
ZipEntries.ZipEntryWithContent::content));
names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty()));
return entries;
}
}
} |
The `DeploymentData` has the tags, but they're not read anywhere. The implementation of the `ConfigServer` needs to do that. | public static PrepareParams fromHttpRequest(HttpRequest request, TenantName tenant, Duration barrierTimeout) {
return new Builder().ignoreValidationErrors(request.getBooleanProperty(IGNORE_VALIDATION_PARAM_NAME))
.dryRun(request.getBooleanProperty(DRY_RUN_PARAM_NAME))
.verbose(request.getBooleanProperty(VERBOSE_PARAM_NAME))
.timeoutBudget(SessionHandler.getTimeoutBudget(request, barrierTimeout))
.applicationId(createApplicationId(request, tenant))
.tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME)))
.vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
.containerEndpoints(request.getProperty(CONTAINER_ENDPOINTS_PARAM_NAME))
.endpointCertificateMetadata(request.getProperty(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME))
.dockerImageRepository(request.getProperty(DOCKER_IMAGE_REPOSITORY))
.athenzDomain(request.getProperty(ATHENZ_DOMAIN))
.quota(request.getProperty(QUOTA_PARAM_NAME))
.tenantSecretStores(request.getProperty(TENANT_SECRET_STORES_PARAM_NAME))
.force(request.getBooleanProperty(FORCE_PARAM_NAME))
.waitForResourcesInPrepare(request.getBooleanProperty(WAIT_FOR_RESOURCES_IN_PREPARE))
.build();
} | .tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME))) | public static PrepareParams fromHttpRequest(HttpRequest request, TenantName tenant, Duration barrierTimeout) {
return new Builder().ignoreValidationErrors(request.getBooleanProperty(IGNORE_VALIDATION_PARAM_NAME))
.dryRun(request.getBooleanProperty(DRY_RUN_PARAM_NAME))
.verbose(request.getBooleanProperty(VERBOSE_PARAM_NAME))
.timeoutBudget(SessionHandler.getTimeoutBudget(request, barrierTimeout))
.applicationId(createApplicationId(request, tenant))
.tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME)))
.vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
.containerEndpoints(request.getProperty(CONTAINER_ENDPOINTS_PARAM_NAME))
.endpointCertificateMetadata(request.getProperty(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME))
.dockerImageRepository(request.getProperty(DOCKER_IMAGE_REPOSITORY))
.athenzDomain(request.getProperty(ATHENZ_DOMAIN))
.quota(request.getProperty(QUOTA_PARAM_NAME))
.tenantSecretStores(request.getProperty(TENANT_SECRET_STORES_PARAM_NAME))
.force(request.getBooleanProperty(FORCE_PARAM_NAME))
.waitForResourcesInPrepare(request.getBooleanProperty(WAIT_FOR_RESOURCES_IN_PREPARE))
.build();
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private boolean force = false;
private boolean waitForResourcesInPrepare = false;
private ApplicationId applicationId = null;
private Tags tags = Tags.empty();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = null;
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
private Optional<Quota> quota = Optional.empty();
private List<TenantSecretStore> tenantSecretStores = List.of();
private List<X509Certificate> operatorCertificates = List.of();
private Optional<CloudAccount> cloudAccount = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder tags(Tags tags) {
this.tags = tags;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
this.containerEndpoints = (serialized == null)
? List.of()
: ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized));
return this;
}
public Builder containerEndpointList(List<ContainerEndpoint> endpoints) {
this.containerEndpoints = endpoints;
return this;
}
public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) {
this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata);
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
: Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder dockerImageRepository(String dockerImageRepository) {
this.dockerImageRepository = (dockerImageRepository == null)
? Optional.empty()
: Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
this.dockerImageRepository = Optional.ofNullable(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain);
return this;
}
public Builder quota(Quota quota) {
this.quota = Optional.ofNullable(quota);
return this;
}
public Builder quota(String serialized) {
this.quota = (serialized == null)
? Optional.empty()
: Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder tenantSecretStores(String serialized) {
List<TenantSecretStore> secretStores = (serialized == null)
? List.of()
: TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get());
return tenantSecretStores(secretStores);
}
public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) {
this.tenantSecretStores = tenantSecretStores;
return this;
}
public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) {
this.waitForResourcesInPrepare = waitForResourcesInPrepare;
return this;
}
public Builder force(boolean force) {
this.force = force;
return this;
}
public Builder operatorCertificates(List<X509Certificate> operatorCertificates) {
this.operatorCertificates = List.copyOf(operatorCertificates);
return this;
}
public Builder cloudAccount(CloudAccount cloudAccount) {
this.cloudAccount = Optional.ofNullable(cloudAccount);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId,
tags,
timeoutBudget,
ignoreValidationErrors,
dryRun,
verbose,
isBootstrap,
vespaVersion,
containerEndpoints,
endpointCertificateMetadata,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
force,
waitForResourcesInPrepare,
operatorCertificates,
cloudAccount);
}
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private boolean force = false;
private boolean waitForResourcesInPrepare = false;
private ApplicationId applicationId = null;
private Tags tags = Tags.empty();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = null;
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
private Optional<Quota> quota = Optional.empty();
private List<TenantSecretStore> tenantSecretStores = List.of();
private List<X509Certificate> operatorCertificates = List.of();
private Optional<CloudAccount> cloudAccount = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder tags(Tags tags) {
this.tags = tags;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
this.containerEndpoints = (serialized == null)
? List.of()
: ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized));
return this;
}
public Builder containerEndpointList(List<ContainerEndpoint> endpoints) {
this.containerEndpoints = endpoints;
return this;
}
public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) {
this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata);
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
: Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder dockerImageRepository(String dockerImageRepository) {
this.dockerImageRepository = (dockerImageRepository == null)
? Optional.empty()
: Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
this.dockerImageRepository = Optional.ofNullable(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain);
return this;
}
public Builder quota(Quota quota) {
this.quota = Optional.ofNullable(quota);
return this;
}
public Builder quota(String serialized) {
this.quota = (serialized == null)
? Optional.empty()
: Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder tenantSecretStores(String serialized) {
List<TenantSecretStore> secretStores = (serialized == null)
? List.of()
: TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get());
return tenantSecretStores(secretStores);
}
public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) {
this.tenantSecretStores = tenantSecretStores;
return this;
}
public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) {
this.waitForResourcesInPrepare = waitForResourcesInPrepare;
return this;
}
public Builder force(boolean force) {
this.force = force;
return this;
}
public Builder operatorCertificates(List<X509Certificate> operatorCertificates) {
this.operatorCertificates = List.copyOf(operatorCertificates);
return this;
}
public Builder cloudAccount(CloudAccount cloudAccount) {
this.cloudAccount = Optional.ofNullable(cloudAccount);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId,
tags,
timeoutBudget,
ignoreValidationErrors,
dryRun,
verbose,
isBootstrap,
vespaVersion,
containerEndpoints,
endpointCertificateMetadata,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
force,
waitForResourcesInPrepare,
operatorCertificates,
cloudAccount);
}
} |
just as a temp variable? not used in other place? and how about allViewExpandStmtListStr instead of allViewStmtSuffix, it may be a little confused? | public CacheAnalyzer(ConnectContext context, StatementBase parsedStmt, List<ScanNode> scanNodes) {
this.context = context;
this.parsedStmt = parsedStmt;
this.scanNodes = scanNodes;
allViewStmtSuffix = new StringBuilder();
allViewStmtSet = new HashSet<>();
checkCacheConfig();
} | allViewStmtSuffix = new StringBuilder(); | public CacheAnalyzer(ConnectContext context, StatementBase parsedStmt, List<ScanNode> scanNodes) {
this.context = context;
this.parsedStmt = parsedStmt;
this.scanNodes = scanNodes;
allViewStmtSet = new HashSet<>();
checkCacheConfig();
} | class CacheAnalyzer {
private static final Logger LOG = LogManager.getLogger(CacheAnalyzer.class);
/**
* NoNeed : disable config or variable, not query, not scan table etc.
*/
public enum CacheMode {
NoNeed,
None,
TTL,
Sql,
Partition
}
private ConnectContext context;
private boolean enableSqlCache = false;
private boolean enablePartitionCache = false;
private TUniqueId queryId;
private CacheMode cacheMode;
private CacheTable latestTable;
private StatementBase parsedStmt;
private SelectStmt selectStmt;
private List<ScanNode> scanNodes;
private OlapTable olapTable;
private RangePartitionInfo partitionInfo;
private Column partColumn;
private CompoundPredicate partitionPredicate;
private Cache cache;
private StringBuilder allViewStmtSuffix;
private Set<String> allViewStmtSet;
public Cache getCache() {
return cache;
}
public CacheAnalyzer(ConnectContext context, StatementBase parsedStmt, Planner planner) {
this.context = context;
this.queryId = context.queryId();
this.parsedStmt = parsedStmt;
scanNodes = planner.getScanNodes();
latestTable = new CacheTable();
allViewStmtSuffix = new StringBuilder();
allViewStmtSet = new HashSet<>();
checkCacheConfig();
}
private void checkCacheConfig() {
if (Config.cache_enable_sql_mode) {
if (context.getSessionVariable().isEnableSqlCache()) {
enableSqlCache = true;
}
}
if (Config.cache_enable_partition_mode) {
if (context.getSessionVariable().isEnablePartitionCache()) {
enablePartitionCache = true;
}
}
}
public CacheMode getCacheMode() {
return cacheMode;
}
public class CacheTable implements Comparable<CacheTable> {
public OlapTable olapTable;
public long latestPartitionId;
public long latestVersion;
public long latestTime;
public CacheTable() {
olapTable = null;
latestPartitionId = 0;
latestVersion = 0;
latestTime = 0;
}
@Override
public int compareTo(CacheTable table) {
return (int) (table.latestTime - this.latestTime);
}
public void Debug() {
LOG.debug("table {}, partition id {}, ver {}, time {}", olapTable.getName(), latestPartitionId, latestVersion, latestTime);
}
}
public boolean enableCache() {
return enableSqlCache || enablePartitionCache;
}
public boolean enableSqlCache() {
return enableSqlCache;
}
public boolean enablePartitionCache() {
return enablePartitionCache;
}
/**
* Check cache mode with SQL and table
* 1、Only Olap table
* 2、The update time of the table is before Config.last_version_interval_time
* 2、PartitionType is PartitionType.RANGE, and partition key has only one column
* 4、Partition key must be included in the group by clause
* 5、Where clause must contain only one partition key predicate
* CacheMode.Sql
* xxx FROM user_profile, updated before Config.last_version_interval_time
* CacheMode.Partition, partition by event_date, only the partition of today will be updated.
* SELECT xxx FROM app_event WHERE event_date >= 20191201 AND event_date <= 20191207 GROUP BY event_date
* SELECT xxx FROM app_event INNER JOIN user_Profile ON app_event.user_id = user_profile.user_id xxx
* SELECT xxx FROM app_event INNER JOIN user_profile ON xxx INNER JOIN site_channel ON xxx
*/
public void checkCacheMode(long now) {
cacheMode = innerCheckCacheMode(now);
}
private CacheMode innerCheckCacheMode(long now) {
if (!enableCache()) {
LOG.debug("cache is disabled. queryid {}", DebugUtil.printId(queryId));
return CacheMode.NoNeed;
}
if (!(parsedStmt instanceof SelectStmt) || scanNodes.size() == 0) {
LOG.debug("not a select stmt or no scan node. queryid {}", DebugUtil.printId(queryId));
return CacheMode.NoNeed;
}
MetricRepo.COUNTER_QUERY_TABLE.increase(1L);
this.selectStmt = (SelectStmt) parsedStmt;
List<CacheTable> tblTimeList = Lists.newArrayList();
for (int i = 0; i < scanNodes.size(); i++) {
ScanNode node = scanNodes.get(i);
if (!(node instanceof OlapScanNode)) {
LOG.debug("query contains non-olap table. queryid {}", DebugUtil.printId(queryId));
return CacheMode.None;
}
OlapScanNode oNode = (OlapScanNode) node;
OlapTable oTable = oNode.getOlapTable();
CacheTable cTable = getLastUpdateTime(oTable);
tblTimeList.add(cTable);
}
MetricRepo.COUNTER_QUERY_OLAP_TABLE.increase(1L);
Collections.sort(tblTimeList);
latestTable = tblTimeList.get(0);
latestTable.Debug();
addAllViewStmt(selectStmt);
for (String stmt : allViewStmtSet) {
allViewStmtSuffix.append("_").append(stmt);
}
if (now == 0) {
now = nowtime();
}
if (enableSqlCache() &&
(now - latestTable.latestTime) >= Config.cache_last_version_interval_second * 1000) {
LOG.debug("TIME:{},{},{}", now, latestTable.latestTime, Config.cache_last_version_interval_second*1000);
cache = new SqlCache(this.queryId, this.selectStmt);
((SqlCache) cache).setCacheInfo(this.latestTable, allViewStmtSuffix.toString());
MetricRepo.COUNTER_CACHE_MODE_SQL.increase(1L);
return CacheMode.Sql;
}
if (!enablePartitionCache()) {
LOG.debug("partition query cache is disabled. queryid {}", DebugUtil.printId(queryId));
return CacheMode.None;
}
for (int i = 1; i < tblTimeList.size(); i++) {
if ((now - tblTimeList.get(i).latestTime) < Config.cache_last_version_interval_second * 1000) {
LOG.debug("the time of other tables is newer than {} s, queryid {}",
Config.cache_last_version_interval_second, DebugUtil.printId(queryId));
return CacheMode.None;
}
}
olapTable = latestTable.olapTable;
if (olapTable.getPartitionInfo().getType() != PartitionType.RANGE) {
LOG.debug("the partition of OlapTable not RANGE type, queryid {}", DebugUtil.printId(queryId));
return CacheMode.None;
}
partitionInfo = (RangePartitionInfo) olapTable.getPartitionInfo();
List<Column> columns = partitionInfo.getPartitionColumns();
if (columns.size() != 1) {
LOG.debug("more than one partition column, queryid {}", columns.size(), DebugUtil.printId(queryId));
return CacheMode.None;
}
partColumn = columns.get(0);
if (!checkGroupByPartitionKey(this.selectStmt, partColumn)) {
LOG.debug("group by columns does not contains all partition column, queryid {}", DebugUtil.printId(queryId));
return CacheMode.None;
}
List<CompoundPredicate> compoundPredicates = Lists.newArrayList();
getPartitionKeyFromSelectStmt(this.selectStmt, partColumn, compoundPredicates);
if (compoundPredicates.size() != 1) {
LOG.debug("empty or more than one predicates contain partition column, queryid {}", DebugUtil.printId(queryId));
return CacheMode.None;
}
partitionPredicate = compoundPredicates.get(0);
cache = new PartitionCache(this.queryId, this.selectStmt);
((PartitionCache) cache).setCacheInfo(this.latestTable, this.partitionInfo, this.partColumn,
this.partitionPredicate, allViewStmtSuffix.toString());
MetricRepo.COUNTER_CACHE_MODE_PARTITION.increase(1L);
return CacheMode.Partition;
}
public InternalService.PFetchCacheResult getCacheData() {
cacheMode = innerCheckCacheMode(0);
if (cacheMode == CacheMode.NoNeed) {
return null;
}
if (cacheMode == CacheMode.None) {
return null;
}
Status status = new Status();
InternalService.PFetchCacheResult cacheResult = cache.getCacheData(status);
if (status.ok() && cacheResult != null && cacheResult.getStatus() == InternalService.PCacheStatus.CACHE_OK) {
int rowCount = 0;
int dataSize = 0;
for (InternalService.PCacheValue value : cacheResult.getValuesList()) {
rowCount += value.getRowsCount();
dataSize += value.getDataSize();
}
LOG.debug("hit cache, mode {}, queryid {}, all count {}, value count {}, row count {}, data size {}",
cacheMode, DebugUtil.printId(queryId),
cacheResult.getAllCount(), cacheResult.getValuesCount(),
rowCount, dataSize);
} else {
LOG.debug("miss cache, mode {}, queryid {}, code {}, msg {}", cacheMode,
DebugUtil.printId(queryId), status.getErrorCode(), status.getErrorMsg());
cacheResult = null;
}
return cacheResult;
}
public long nowtime() {
return System.currentTimeMillis();
}
private void getPartitionKeyFromSelectStmt(SelectStmt stmt, Column partColumn,
List<CompoundPredicate> compoundPredicates) {
getPartitionKeyFromWhereClause(stmt.getWhereClause(), partColumn, compoundPredicates);
List<TableRef> tableRefs = stmt.getTableRefs();
for (TableRef tblRef : tableRefs) {
if (tblRef instanceof InlineViewRef) {
InlineViewRef viewRef = (InlineViewRef) tblRef;
QueryStmt queryStmt = viewRef.getViewStmt();
if (queryStmt instanceof SelectStmt) {
getPartitionKeyFromSelectStmt((SelectStmt) queryStmt, partColumn, compoundPredicates);
}
}
}
}
/**
* Only support case 1
* 1.key >= a and key <= b
* 2.key = a or key = b
* 3.key in(a,b,c)
*/
private void getPartitionKeyFromWhereClause(Expr expr, Column partColumn,
List<CompoundPredicate> compoundPredicates) {
if (expr == null) {
return;
}
if (expr instanceof CompoundPredicate) {
CompoundPredicate cp = (CompoundPredicate) expr;
if (cp.getOp() == CompoundPredicate.Operator.AND) {
if (cp.getChildren().size() == 2 && cp.getChild(0) instanceof BinaryPredicate &&
cp.getChild(1) instanceof BinaryPredicate) {
BinaryPredicate leftPre = (BinaryPredicate) cp.getChild(0);
BinaryPredicate rightPre = (BinaryPredicate) cp.getChild(1);
String leftColumn = getColumnName(leftPre);
String rightColumn = getColumnName(rightPre);
if (leftColumn.equalsIgnoreCase(partColumn.getName()) &&
rightColumn.equalsIgnoreCase(partColumn.getName())) {
compoundPredicates.add(cp);
}
}
}
for (Expr subExpr : expr.getChildren()) {
getPartitionKeyFromWhereClause(subExpr, partColumn, compoundPredicates);
}
}
}
private String getColumnName(BinaryPredicate predicate) {
SlotRef slot = null;
if (predicate.getChild(0) instanceof SlotRef) {
slot = (SlotRef) predicate.getChild(0);
} else if (predicate.getChild(0) instanceof CastExpr) {
CastExpr expr = (CastExpr) predicate.getChild(0);
if (expr.getChild(0) instanceof SlotRef) {
slot = (SlotRef) expr.getChild(0);
}
}
if (slot != null) {
return slot.getColumnName();
}
return "";
}
/**
* Check the selectStmt and tableRefs always group by partition key
* 1. At least one group by
* 2. group by must contain partition key
*/
private boolean checkGroupByPartitionKey(SelectStmt stmt, Column partColumn) {
List<AggregateInfo> aggInfoList = Lists.newArrayList();
getAggInfoList(stmt, aggInfoList);
int groupbyCount = 0;
for (AggregateInfo aggInfo : aggInfoList) {
/*
Support COUNT(DISTINCT xxx) now,next version will remove the code
if (aggInfo.isDistinctAgg()) {
return false;
}*/
ArrayList<Expr> groupExprs = aggInfo.getGroupingExprs();
if (groupExprs == null) {
continue;
}
groupbyCount += 1;
boolean matched = false;
for (Expr groupExpr : groupExprs) {
SlotRef slot = (SlotRef) groupExpr;
if (partColumn.getName().equals(slot.getColumnName())) {
matched = true;
break;
}
}
if (!matched) {
return false;
}
}
return groupbyCount > 0 ? true : false;
}
private void getAggInfoList(SelectStmt stmt, List<AggregateInfo> aggInfoList) {
AggregateInfo aggInfo = stmt.getAggInfo();
if (aggInfo != null) {
aggInfoList.add(aggInfo);
}
List<TableRef> tableRefs = stmt.getTableRefs();
for (TableRef tblRef : tableRefs) {
if (tblRef instanceof InlineViewRef) {
InlineViewRef viewRef = (InlineViewRef) tblRef;
QueryStmt queryStmt = viewRef.getViewStmt();
if (queryStmt instanceof SelectStmt) {
getAggInfoList((SelectStmt) queryStmt, aggInfoList);
}
}
}
}
private CacheTable getLastUpdateTime(OlapTable olapTable) {
CacheTable table = new CacheTable();
table.olapTable = olapTable;
for (Partition partition : olapTable.getPartitions()) {
if (partition.getVisibleVersionTime() >= table.latestTime) {
table.latestPartitionId = partition.getId();
table.latestTime = partition.getVisibleVersionTime();
table.latestVersion = partition.getVisibleVersion();
}
}
return table;
}
private void addAllViewStmt(List<TableRef> tblRefs) {
for (TableRef tblRef : tblRefs) {
if (tblRef instanceof InlineViewRef) {
InlineViewRef inlineViewRef = (InlineViewRef) tblRef;
if (inlineViewRef.isLocalView()) {
Collection<View> views = inlineViewRef.getAnalyzer().getLocalViews().values();
for (View view : views) {
addAllViewStmt(view.getQueryStmt());
}
} else {
addAllViewStmt(inlineViewRef.getViewStmt());
allViewStmtSet.add(inlineViewRef.getView().getInlineViewDef());
}
}
}
}
private void addAllViewStmt(QueryStmt queryStmt) {
if (queryStmt instanceof SelectStmt) {
addAllViewStmt(((SelectStmt) queryStmt).getTableRefs());
} else if (queryStmt instanceof SetOperationStmt) {
for (SetOperationStmt.SetOperand operand : ((SetOperationStmt) queryStmt).getOperands()) {
addAllViewStmt(((SelectStmt) operand.getQueryStmt()).getTableRefs());
}
}
}
public Cache.HitRange getHitRange() {
if (cacheMode == CacheMode.None) {
return Cache.HitRange.None;
}
return cache.getHitRange();
}
public SelectStmt getRewriteStmt() {
if (cacheMode != CacheMode.Partition) {
return null;
}
return cache.getRewriteStmt();
}
public void copyRowBatch(RowBatch rowBatch) {
if (cacheMode == CacheMode.None || cacheMode == CacheMode.NoNeed) {
return;
}
cache.copyRowBatch(rowBatch);
}
public void updateCache() {
if (cacheMode == CacheMode.None || cacheMode == CacheMode.NoNeed) {
return;
}
cache.updateCache();
}
} | class CacheAnalyzer {
private static final Logger LOG = LogManager.getLogger(CacheAnalyzer.class);
/**
* NoNeed : disable config or variable, not query, not scan table etc.
*/
public enum CacheMode {
NoNeed,
None,
TTL,
Sql,
Partition
}
private ConnectContext context;
private boolean enableSqlCache = false;
private boolean enablePartitionCache = false;
private TUniqueId queryId;
private CacheMode cacheMode;
private CacheTable latestTable;
private StatementBase parsedStmt;
private SelectStmt selectStmt;
private List<ScanNode> scanNodes;
private OlapTable olapTable;
private RangePartitionInfo partitionInfo;
private Column partColumn;
private CompoundPredicate partitionPredicate;
private Cache cache;
private Set<String> allViewStmtSet;
public Cache getCache() {
return cache;
}
public CacheAnalyzer(ConnectContext context, StatementBase parsedStmt, Planner planner) {
this.context = context;
this.queryId = context.queryId();
this.parsedStmt = parsedStmt;
scanNodes = planner.getScanNodes();
latestTable = new CacheTable();
allViewStmtSet = new HashSet<>();
checkCacheConfig();
}
private void checkCacheConfig() {
if (Config.cache_enable_sql_mode) {
if (context.getSessionVariable().isEnableSqlCache()) {
enableSqlCache = true;
}
}
if (Config.cache_enable_partition_mode) {
if (context.getSessionVariable().isEnablePartitionCache()) {
enablePartitionCache = true;
}
}
}
public CacheMode getCacheMode() {
return cacheMode;
}
public class CacheTable implements Comparable<CacheTable> {
public OlapTable olapTable;
public long latestPartitionId;
public long latestVersion;
public long latestTime;
public CacheTable() {
olapTable = null;
latestPartitionId = 0;
latestVersion = 0;
latestTime = 0;
}
@Override
public int compareTo(CacheTable table) {
return (int) (table.latestTime - this.latestTime);
}
public void Debug() {
LOG.debug("table {}, partition id {}, ver {}, time {}", olapTable.getName(), latestPartitionId, latestVersion, latestTime);
}
}
public boolean enableCache() {
return enableSqlCache || enablePartitionCache;
}
public boolean enableSqlCache() {
return enableSqlCache;
}
public boolean enablePartitionCache() {
return enablePartitionCache;
}
/**
* Check cache mode with SQL and table
* 1、Only Olap table
* 2、The update time of the table is before Config.last_version_interval_time
* 2、PartitionType is PartitionType.RANGE, and partition key has only one column
* 4、Partition key must be included in the group by clause
* 5、Where clause must contain only one partition key predicate
* CacheMode.Sql
* xxx FROM user_profile, updated before Config.last_version_interval_time
* CacheMode.Partition, partition by event_date, only the partition of today will be updated.
* SELECT xxx FROM app_event WHERE event_date >= 20191201 AND event_date <= 20191207 GROUP BY event_date
* SELECT xxx FROM app_event INNER JOIN user_Profile ON app_event.user_id = user_profile.user_id xxx
* SELECT xxx FROM app_event INNER JOIN user_profile ON xxx INNER JOIN site_channel ON xxx
*/
public void checkCacheMode(long now) {
cacheMode = innerCheckCacheMode(now);
}
private CacheMode innerCheckCacheMode(long now) {
if (!enableCache()) {
LOG.debug("cache is disabled. queryid {}", DebugUtil.printId(queryId));
return CacheMode.NoNeed;
}
if (!(parsedStmt instanceof SelectStmt) || scanNodes.size() == 0) {
LOG.debug("not a select stmt or no scan node. queryid {}", DebugUtil.printId(queryId));
return CacheMode.NoNeed;
}
MetricRepo.COUNTER_QUERY_TABLE.increase(1L);
this.selectStmt = (SelectStmt) parsedStmt;
List<CacheTable> tblTimeList = Lists.newArrayList();
for (int i = 0; i < scanNodes.size(); i++) {
ScanNode node = scanNodes.get(i);
if (!(node instanceof OlapScanNode)) {
LOG.debug("query contains non-olap table. queryid {}", DebugUtil.printId(queryId));
return CacheMode.None;
}
OlapScanNode oNode = (OlapScanNode) node;
OlapTable oTable = oNode.getOlapTable();
CacheTable cTable = getLastUpdateTime(oTable);
tblTimeList.add(cTable);
}
MetricRepo.COUNTER_QUERY_OLAP_TABLE.increase(1L);
Collections.sort(tblTimeList);
latestTable = tblTimeList.get(0);
latestTable.Debug();
addAllViewStmt(selectStmt);
String allViewExpandStmtListStr = StringUtils.join(allViewStmtSet, ",");
if (now == 0) {
now = nowtime();
}
if (enableSqlCache() &&
(now - latestTable.latestTime) >= Config.cache_last_version_interval_second * 1000) {
LOG.debug("TIME:{},{},{}", now, latestTable.latestTime, Config.cache_last_version_interval_second*1000);
cache = new SqlCache(this.queryId, this.selectStmt);
((SqlCache) cache).setCacheInfo(this.latestTable, allViewExpandStmtListStr);
MetricRepo.COUNTER_CACHE_MODE_SQL.increase(1L);
return CacheMode.Sql;
}
if (!enablePartitionCache()) {
LOG.debug("partition query cache is disabled. queryid {}", DebugUtil.printId(queryId));
return CacheMode.None;
}
for (int i = 1; i < tblTimeList.size(); i++) {
if ((now - tblTimeList.get(i).latestTime) < Config.cache_last_version_interval_second * 1000) {
LOG.debug("the time of other tables is newer than {} s, queryid {}",
Config.cache_last_version_interval_second, DebugUtil.printId(queryId));
return CacheMode.None;
}
}
olapTable = latestTable.olapTable;
if (olapTable.getPartitionInfo().getType() != PartitionType.RANGE) {
LOG.debug("the partition of OlapTable not RANGE type, queryid {}", DebugUtil.printId(queryId));
return CacheMode.None;
}
partitionInfo = (RangePartitionInfo) olapTable.getPartitionInfo();
List<Column> columns = partitionInfo.getPartitionColumns();
if (columns.size() != 1) {
LOG.debug("more than one partition column, queryid {}", columns.size(), DebugUtil.printId(queryId));
return CacheMode.None;
}
partColumn = columns.get(0);
if (!checkGroupByPartitionKey(this.selectStmt, partColumn)) {
LOG.debug("group by columns does not contains all partition column, queryid {}", DebugUtil.printId(queryId));
return CacheMode.None;
}
List<CompoundPredicate> compoundPredicates = Lists.newArrayList();
getPartitionKeyFromSelectStmt(this.selectStmt, partColumn, compoundPredicates);
if (compoundPredicates.size() != 1) {
LOG.debug("empty or more than one predicates contain partition column, queryid {}", DebugUtil.printId(queryId));
return CacheMode.None;
}
partitionPredicate = compoundPredicates.get(0);
cache = new PartitionCache(this.queryId, this.selectStmt);
((PartitionCache) cache).setCacheInfo(this.latestTable, this.partitionInfo, this.partColumn,
this.partitionPredicate, allViewExpandStmtListStr);
MetricRepo.COUNTER_CACHE_MODE_PARTITION.increase(1L);
return CacheMode.Partition;
}
public InternalService.PFetchCacheResult getCacheData() {
cacheMode = innerCheckCacheMode(0);
if (cacheMode == CacheMode.NoNeed) {
return null;
}
if (cacheMode == CacheMode.None) {
return null;
}
Status status = new Status();
InternalService.PFetchCacheResult cacheResult = cache.getCacheData(status);
if (status.ok() && cacheResult != null && cacheResult.getStatus() == InternalService.PCacheStatus.CACHE_OK) {
int rowCount = 0;
int dataSize = 0;
for (InternalService.PCacheValue value : cacheResult.getValuesList()) {
rowCount += value.getRowsCount();
dataSize += value.getDataSize();
}
LOG.debug("hit cache, mode {}, queryid {}, all count {}, value count {}, row count {}, data size {}",
cacheMode, DebugUtil.printId(queryId),
cacheResult.getAllCount(), cacheResult.getValuesCount(),
rowCount, dataSize);
} else {
LOG.debug("miss cache, mode {}, queryid {}, code {}, msg {}", cacheMode,
DebugUtil.printId(queryId), status.getErrorCode(), status.getErrorMsg());
cacheResult = null;
}
return cacheResult;
}
public long nowtime() {
return System.currentTimeMillis();
}
private void getPartitionKeyFromSelectStmt(SelectStmt stmt, Column partColumn,
List<CompoundPredicate> compoundPredicates) {
getPartitionKeyFromWhereClause(stmt.getWhereClause(), partColumn, compoundPredicates);
List<TableRef> tableRefs = stmt.getTableRefs();
for (TableRef tblRef : tableRefs) {
if (tblRef instanceof InlineViewRef) {
InlineViewRef viewRef = (InlineViewRef) tblRef;
QueryStmt queryStmt = viewRef.getViewStmt();
if (queryStmt instanceof SelectStmt) {
getPartitionKeyFromSelectStmt((SelectStmt) queryStmt, partColumn, compoundPredicates);
}
}
}
}
/**
* Only support case 1
* 1.key >= a and key <= b
* 2.key = a or key = b
* 3.key in(a,b,c)
*/
private void getPartitionKeyFromWhereClause(Expr expr, Column partColumn,
List<CompoundPredicate> compoundPredicates) {
if (expr == null) {
return;
}
if (expr instanceof CompoundPredicate) {
CompoundPredicate cp = (CompoundPredicate) expr;
if (cp.getOp() == CompoundPredicate.Operator.AND) {
if (cp.getChildren().size() == 2 && cp.getChild(0) instanceof BinaryPredicate &&
cp.getChild(1) instanceof BinaryPredicate) {
BinaryPredicate leftPre = (BinaryPredicate) cp.getChild(0);
BinaryPredicate rightPre = (BinaryPredicate) cp.getChild(1);
String leftColumn = getColumnName(leftPre);
String rightColumn = getColumnName(rightPre);
if (leftColumn.equalsIgnoreCase(partColumn.getName()) &&
rightColumn.equalsIgnoreCase(partColumn.getName())) {
compoundPredicates.add(cp);
}
}
}
for (Expr subExpr : expr.getChildren()) {
getPartitionKeyFromWhereClause(subExpr, partColumn, compoundPredicates);
}
}
}
private String getColumnName(BinaryPredicate predicate) {
SlotRef slot = null;
if (predicate.getChild(0) instanceof SlotRef) {
slot = (SlotRef) predicate.getChild(0);
} else if (predicate.getChild(0) instanceof CastExpr) {
CastExpr expr = (CastExpr) predicate.getChild(0);
if (expr.getChild(0) instanceof SlotRef) {
slot = (SlotRef) expr.getChild(0);
}
}
if (slot != null) {
return slot.getColumnName();
}
return "";
}
/**
* Check the selectStmt and tableRefs always group by partition key
* 1. At least one group by
* 2. group by must contain partition key
*/
private boolean checkGroupByPartitionKey(SelectStmt stmt, Column partColumn) {
List<AggregateInfo> aggInfoList = Lists.newArrayList();
getAggInfoList(stmt, aggInfoList);
int groupbyCount = 0;
for (AggregateInfo aggInfo : aggInfoList) {
/*
Support COUNT(DISTINCT xxx) now,next version will remove the code
if (aggInfo.isDistinctAgg()) {
return false;
}*/
ArrayList<Expr> groupExprs = aggInfo.getGroupingExprs();
if (groupExprs == null) {
continue;
}
groupbyCount += 1;
boolean matched = false;
for (Expr groupExpr : groupExprs) {
SlotRef slot = (SlotRef) groupExpr;
if (partColumn.getName().equals(slot.getColumnName())) {
matched = true;
break;
}
}
if (!matched) {
return false;
}
}
return groupbyCount > 0 ? true : false;
}
private void getAggInfoList(SelectStmt stmt, List<AggregateInfo> aggInfoList) {
AggregateInfo aggInfo = stmt.getAggInfo();
if (aggInfo != null) {
aggInfoList.add(aggInfo);
}
List<TableRef> tableRefs = stmt.getTableRefs();
for (TableRef tblRef : tableRefs) {
if (tblRef instanceof InlineViewRef) {
InlineViewRef viewRef = (InlineViewRef) tblRef;
QueryStmt queryStmt = viewRef.getViewStmt();
if (queryStmt instanceof SelectStmt) {
getAggInfoList((SelectStmt) queryStmt, aggInfoList);
}
}
}
}
private CacheTable getLastUpdateTime(OlapTable olapTable) {
CacheTable table = new CacheTable();
table.olapTable = olapTable;
for (Partition partition : olapTable.getPartitions()) {
if (partition.getVisibleVersionTime() >= table.latestTime) {
table.latestPartitionId = partition.getId();
table.latestTime = partition.getVisibleVersionTime();
table.latestVersion = partition.getVisibleVersion();
}
}
return table;
}
private void addAllViewStmt(List<TableRef> tblRefs) {
for (TableRef tblRef : tblRefs) {
if (tblRef instanceof InlineViewRef) {
InlineViewRef inlineViewRef = (InlineViewRef) tblRef;
if (inlineViewRef.isLocalView()) {
Collection<View> views = inlineViewRef.getAnalyzer().getLocalViews().values();
for (View view : views) {
addAllViewStmt(view.getQueryStmt());
}
} else {
addAllViewStmt(inlineViewRef.getViewStmt());
allViewStmtSet.add(inlineViewRef.getView().getInlineViewDef());
}
}
}
}
private void addAllViewStmt(QueryStmt queryStmt) {
if (queryStmt instanceof SelectStmt) {
addAllViewStmt(((SelectStmt) queryStmt).getTableRefs());
} else if (queryStmt instanceof SetOperationStmt) {
for (SetOperationStmt.SetOperand operand : ((SetOperationStmt) queryStmt).getOperands()) {
addAllViewStmt(((SelectStmt) operand.getQueryStmt()).getTableRefs());
}
}
}
public Cache.HitRange getHitRange() {
if (cacheMode == CacheMode.None) {
return Cache.HitRange.None;
}
return cache.getHitRange();
}
public SelectStmt getRewriteStmt() {
if (cacheMode != CacheMode.Partition) {
return null;
}
return cache.getRewriteStmt();
}
public void copyRowBatch(RowBatch rowBatch) {
if (cacheMode == CacheMode.None || cacheMode == CacheMode.NoNeed) {
return;
}
cache.copyRowBatch(rowBatch);
}
public void updateCache() {
if (cacheMode == CacheMode.None || cacheMode == CacheMode.NoNeed) {
return;
}
cache.updateCache();
}
} |
Ah, never mind, the include processor is run first. | private void preProcessAndPopulateCache() {
FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile));
if (servicesXml.exists())
try {
new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")),
new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8),
InstanceName.defaultName(),
Environment.prod,
RegionName.defaultName(),
Tags.empty())
.run();
}
catch (IllegalArgumentException e) {
throw e;
}
catch (Exception e) {
throw new IllegalArgumentException(e);
}
} | Tags.empty()) | private void preProcessAndPopulateCache() {
FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile));
if (servicesXml.exists())
try {
new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")),
new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8),
InstanceName.defaultName(),
Environment.prod,
RegionName.defaultName(),
Tags.empty())
.run();
}
catch (IllegalArgumentException e) {
throw e;
}
catch (Exception e) {
throw new IllegalArgumentException(e);
}
} | class ApplicationPackage {
private static final String trustedCertificatesFile = "security/clients.pem";
private static final String buildMetaFile = "build-meta.json";
static final String deploymentFile = "deployment.xml";
private static final String validationOverridesFile = "validation-overrides.xml";
static final String servicesFile = "services.xml";
private final String contentHash;
private final String bundleHash;
private final byte[] zippedContent;
private final DeploymentSpec deploymentSpec;
private final ValidationOverrides validationOverrides;
private final ZipArchiveCache files;
private final Optional<Version> compileVersion;
private final Optional<Instant> buildTime;
private final Optional<Version> parentVersion;
private final List<X509Certificate> trustedCertificates;
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
*/
public ApplicationPackage(byte[] zippedContent) {
this(zippedContent, false);
}
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
* If 'requireFiles' is true, files needed by deployment orchestration must be present.
*/
@SuppressWarnings("deprecation")
public ApplicationPackage(byte[] zippedContent, boolean requireFiles) {
this.zippedContent = Objects.requireNonNull(zippedContent, "The application package content cannot be null");
this.contentHash = Hashing.sha1().hashBytes(zippedContent).toString();
this.files = new ZipArchiveCache(zippedContent, Set.of(deploymentFile, validationOverridesFile, servicesFile, buildMetaFile, trustedCertificatesFile));
Optional<DeploymentSpec> deploymentSpec = files.get(deploymentFile).map(bytes -> new String(bytes, UTF_8)).map(DeploymentSpec::fromXml);
if (requireFiles && deploymentSpec.isEmpty())
throw new IllegalArgumentException("Missing required file '" + deploymentFile + "'");
this.deploymentSpec = deploymentSpec.orElse(DeploymentSpec.empty);
this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty);
Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get);
this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString())));
this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong())));
this.parentVersion = buildMetaObject.flatMap(object -> parse(object, "parentVersion", field -> Version.fromString(field.asString())));
this.trustedCertificates = files.get(trustedCertificatesFile).map(bytes -> X509CertificateUtils.certificateListFromPem(new String(bytes, UTF_8))).orElse(List.of());
this.bundleHash = calculateBundleHash(zippedContent);
preProcessAndPopulateCache();
}
/** Returns a copy of this with the given certificate appended. */
public ApplicationPackage withTrustedCertificate(X509Certificate certificate) {
List<X509Certificate> trustedCertificates = new ArrayList<>(this.trustedCertificates);
trustedCertificates.add(certificate);
byte[] certificatesBytes = X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8);
ByteArrayOutputStream modified = new ByteArrayOutputStream(zippedContent.length + certificatesBytes.length);
ZipEntries.transferAndWrite(modified, new ByteArrayInputStream(zippedContent), trustedCertificatesFile, certificatesBytes);
return new ApplicationPackage(modified.toByteArray());
}
/** Returns a hash of the content of this package */
public String hash() { return contentHash; }
/** Hash of all files and settings that influence what is deployed to config servers. */
public String bundleHash() {
return bundleHash;
}
/** Returns the content of this package. The content <b>must not</b> be modified. */
public byte[] zippedContent() { return zippedContent; }
/**
* Returns the deployment spec from the deployment.xml file of the package content.<br>
* This is the DeploymentSpec.empty instance if this package does not contain a deployment.xml file.<br>
* <em>NB: <strong>Always</strong> read deployment spec from the {@link Application}, for deployment orchestration.</em>
*/
public DeploymentSpec deploymentSpec() { return deploymentSpec; }
/**
* Returns the validation overrides from the validation-overrides.xml file of the package content.
* This is the ValidationOverrides.empty instance if this package does not contain a validation-overrides.xml file.
*/
public ValidationOverrides validationOverrides() { return validationOverrides; }
/** Returns the platform version which package was compiled against, if known. */
public Optional<Version> compileVersion() { return compileVersion; }
/** Returns the time this package was built, if known. */
public Optional<Instant> buildTime() { return buildTime; }
/** Returns the parent version used to compile the package, if known. */
public Optional<Version> parentVersion() { return parentVersion; }
/** Returns the list of certificates trusted by this application, or an empty list if no trust configured. */
public List<X509Certificate> trustedCertificates() {
return trustedCertificates;
}
private static <Type> Optional<Type> parse(Inspector buildMetaObject, String fieldName, Function<Inspector, Type> mapper) {
Inspector field = buildMetaObject.field(fieldName);
if ( ! field.valid() || field.type() == NIX)
return Optional.empty();
try {
return Optional.of(mapper.apply(buildMetaObject.field(fieldName)));
}
catch (RuntimeException e) {
throw new IllegalArgumentException("Failed parsing \"" + fieldName + "\" in '" + buildMetaFile + "': " + Exceptions.toMessageString(e));
}
}
/** Creates a valid application package that will remove all application's deployments */
public static ApplicationPackage deploymentRemoval() {
return new ApplicationPackage(filesZip(Map.of(validationOverridesFile, allValidationOverrides().xmlForm().getBytes(UTF_8),
deploymentFile, DeploymentSpec.empty.xmlForm().getBytes(UTF_8))));
}
/** Returns a zip containing meta data about deployments of this package by the given job. */
public byte[] metaDataZip() {
return cacheZip();
}
private byte[] cacheZip() {
return filesZip(files.cache.entrySet().stream()
.filter(entry -> entry.getValue().isPresent())
.collect(toMap(entry -> entry.getKey().toString(),
entry -> entry.getValue().get())));
}
public static byte[] filesZip(Map<String, byte[]> files) {
try (ZipBuilder zipBuilder = new ZipBuilder(files.values().stream().mapToInt(bytes -> bytes.length).sum() + 512)) {
files.forEach(zipBuilder::add);
zipBuilder.close();
return zipBuilder.toByteArray();
}
}
private static ValidationOverrides allValidationOverrides() {
String until = DateTimeFormatter.ISO_LOCAL_DATE.format(Instant.now().plus(Duration.ofDays(25)).atZone(ZoneOffset.UTC));
StringBuilder validationOverridesContents = new StringBuilder(1000);
validationOverridesContents.append("<validation-overrides version=\"1.0\">\n");
for (ValidationId validationId: ValidationId.values())
validationOverridesContents.append("\t<allow until=\"").append(until).append("\">").append(validationId.value()).append("</allow>\n");
validationOverridesContents.append("</validation-overrides>\n");
return ValidationOverrides.fromXml(validationOverridesContents.toString());
}
@SuppressWarnings("deprecation")
private String calculateBundleHash(byte[] zippedContent) {
Predicate<String> entryMatcher = name -> ! name.endsWith(deploymentFile) && ! name.endsWith(buildMetaFile);
SortedMap<String, Long> crcByEntry = new TreeMap<>();
Options options = Options.standard().pathPredicate(entryMatcher);
ArchiveFile file;
try (ArchiveStreamReader reader = ArchiveStreamReader.ofZip(new ByteArrayInputStream(zippedContent), options)) {
OutputStream discard = OutputStream.nullOutputStream();
while ((file = reader.readNextTo(discard)) != null) {
crcByEntry.put(file.path().toString(), file.crc32().orElse(-1));
}
}
Funnel<SortedMap<String, Long>> funnel = (from, into) -> from.forEach((key, value) -> {
into.putBytes(key.getBytes());
into.putLong(value);
});
return Hashing.sha1().newHasher()
.putObject(crcByEntry, funnel)
.putInt(deploymentSpec.deployableHashCode())
.hash().toString();
}
@SuppressWarnings("deprecation")
public static String calculateHash(byte[] bytes) {
return Hashing.sha1().newHasher()
.putBytes(bytes)
.hash().toString();
}
/** Maps normalized paths to cached content read from a zip archive. */
private static class ZipArchiveCache {
/** Max size of each extracted file */
private static final int maxSize = 10 << 20;
private final byte[] zip;
private final Map<Path, Optional<byte[]>> cache;
public ZipArchiveCache(byte[] zip, Collection<String> prePopulated) {
this.zip = zip;
this.cache = new ConcurrentSkipListMap<>();
this.cache.putAll(read(prePopulated));
}
public Optional<byte[]> get(String path) {
return get(Paths.get(path));
}
public Optional<byte[]> get(Path path) {
return cache.computeIfAbsent(path.normalize(), read(List.of(path.normalize().toString()))::get);
}
public FileSystemWrapper wrapper() {
return FileSystemWrapper.ofFiles(Path.of("./"),
path -> get(path).isPresent(),
path -> get(path).orElseThrow(() -> new NoSuchFileException(path.toString())));
}
private Map<Path, Optional<byte[]>> read(Collection<String> names) {
var entries = ZipEntries.from(zip,
name -> names.contains(name),
maxSize,
true)
.asList().stream()
.collect(toMap(entry -> Paths.get(entry.name()).normalize(),
ZipEntries.ZipEntryWithContent::content));
names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty()));
return entries;
}
}
} | class ApplicationPackage {
private static final String trustedCertificatesFile = "security/clients.pem";
private static final String buildMetaFile = "build-meta.json";
static final String deploymentFile = "deployment.xml";
private static final String validationOverridesFile = "validation-overrides.xml";
static final String servicesFile = "services.xml";
private final String contentHash;
private final String bundleHash;
private final byte[] zippedContent;
private final DeploymentSpec deploymentSpec;
private final ValidationOverrides validationOverrides;
private final ZipArchiveCache files;
private final Optional<Version> compileVersion;
private final Optional<Instant> buildTime;
private final Optional<Version> parentVersion;
private final List<X509Certificate> trustedCertificates;
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
*/
public ApplicationPackage(byte[] zippedContent) {
this(zippedContent, false);
}
/**
* Creates an application package from its zipped content.
* This <b>assigns ownership</b> of the given byte array to this class;
* it must not be further changed by the caller.
* If 'requireFiles' is true, files needed by deployment orchestration must be present.
*/
@SuppressWarnings("deprecation")
public ApplicationPackage(byte[] zippedContent, boolean requireFiles) {
this.zippedContent = Objects.requireNonNull(zippedContent, "The application package content cannot be null");
this.contentHash = Hashing.sha1().hashBytes(zippedContent).toString();
this.files = new ZipArchiveCache(zippedContent, Set.of(deploymentFile, validationOverridesFile, servicesFile, buildMetaFile, trustedCertificatesFile));
Optional<DeploymentSpec> deploymentSpec = files.get(deploymentFile).map(bytes -> new String(bytes, UTF_8)).map(DeploymentSpec::fromXml);
if (requireFiles && deploymentSpec.isEmpty())
throw new IllegalArgumentException("Missing required file '" + deploymentFile + "'");
this.deploymentSpec = deploymentSpec.orElse(DeploymentSpec.empty);
this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty);
Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get);
this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString())));
this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong())));
this.parentVersion = buildMetaObject.flatMap(object -> parse(object, "parentVersion", field -> Version.fromString(field.asString())));
this.trustedCertificates = files.get(trustedCertificatesFile).map(bytes -> X509CertificateUtils.certificateListFromPem(new String(bytes, UTF_8))).orElse(List.of());
this.bundleHash = calculateBundleHash(zippedContent);
preProcessAndPopulateCache();
}
/** Returns a copy of this with the given certificate appended. */
public ApplicationPackage withTrustedCertificate(X509Certificate certificate) {
List<X509Certificate> trustedCertificates = new ArrayList<>(this.trustedCertificates);
trustedCertificates.add(certificate);
byte[] certificatesBytes = X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8);
ByteArrayOutputStream modified = new ByteArrayOutputStream(zippedContent.length + certificatesBytes.length);
ZipEntries.transferAndWrite(modified, new ByteArrayInputStream(zippedContent), trustedCertificatesFile, certificatesBytes);
return new ApplicationPackage(modified.toByteArray());
}
/** Returns a hash of the content of this package */
public String hash() { return contentHash; }
/** Hash of all files and settings that influence what is deployed to config servers. */
public String bundleHash() {
return bundleHash;
}
/** Returns the content of this package. The content <b>must not</b> be modified. */
public byte[] zippedContent() { return zippedContent; }
/**
* Returns the deployment spec from the deployment.xml file of the package content.<br>
* This is the DeploymentSpec.empty instance if this package does not contain a deployment.xml file.<br>
* <em>NB: <strong>Always</strong> read deployment spec from the {@link Application}, for deployment orchestration.</em>
*/
public DeploymentSpec deploymentSpec() { return deploymentSpec; }
/**
* Returns the validation overrides from the validation-overrides.xml file of the package content.
* This is the ValidationOverrides.empty instance if this package does not contain a validation-overrides.xml file.
*/
public ValidationOverrides validationOverrides() { return validationOverrides; }
/** Returns the platform version which package was compiled against, if known. */
public Optional<Version> compileVersion() { return compileVersion; }
/** Returns the time this package was built, if known. */
public Optional<Instant> buildTime() { return buildTime; }
/** Returns the parent version used to compile the package, if known. */
public Optional<Version> parentVersion() { return parentVersion; }
/** Returns the list of certificates trusted by this application, or an empty list if no trust configured. */
public List<X509Certificate> trustedCertificates() {
return trustedCertificates;
}
private static <Type> Optional<Type> parse(Inspector buildMetaObject, String fieldName, Function<Inspector, Type> mapper) {
Inspector field = buildMetaObject.field(fieldName);
if ( ! field.valid() || field.type() == NIX)
return Optional.empty();
try {
return Optional.of(mapper.apply(buildMetaObject.field(fieldName)));
}
catch (RuntimeException e) {
throw new IllegalArgumentException("Failed parsing \"" + fieldName + "\" in '" + buildMetaFile + "': " + Exceptions.toMessageString(e));
}
}
/** Creates a valid application package that will remove all application's deployments */
public static ApplicationPackage deploymentRemoval() {
return new ApplicationPackage(filesZip(Map.of(validationOverridesFile, allValidationOverrides().xmlForm().getBytes(UTF_8),
deploymentFile, DeploymentSpec.empty.xmlForm().getBytes(UTF_8))));
}
/** Returns a zip containing meta data about deployments of this package by the given job. */
public byte[] metaDataZip() {
return cacheZip();
}
private byte[] cacheZip() {
return filesZip(files.cache.entrySet().stream()
.filter(entry -> entry.getValue().isPresent())
.collect(toMap(entry -> entry.getKey().toString(),
entry -> entry.getValue().get())));
}
public static byte[] filesZip(Map<String, byte[]> files) {
try (ZipBuilder zipBuilder = new ZipBuilder(files.values().stream().mapToInt(bytes -> bytes.length).sum() + 512)) {
files.forEach(zipBuilder::add);
zipBuilder.close();
return zipBuilder.toByteArray();
}
}
private static ValidationOverrides allValidationOverrides() {
String until = DateTimeFormatter.ISO_LOCAL_DATE.format(Instant.now().plus(Duration.ofDays(25)).atZone(ZoneOffset.UTC));
StringBuilder validationOverridesContents = new StringBuilder(1000);
validationOverridesContents.append("<validation-overrides version=\"1.0\">\n");
for (ValidationId validationId: ValidationId.values())
validationOverridesContents.append("\t<allow until=\"").append(until).append("\">").append(validationId.value()).append("</allow>\n");
validationOverridesContents.append("</validation-overrides>\n");
return ValidationOverrides.fromXml(validationOverridesContents.toString());
}
@SuppressWarnings("deprecation")
private String calculateBundleHash(byte[] zippedContent) {
Predicate<String> entryMatcher = name -> ! name.endsWith(deploymentFile) && ! name.endsWith(buildMetaFile);
SortedMap<String, Long> crcByEntry = new TreeMap<>();
Options options = Options.standard().pathPredicate(entryMatcher);
ArchiveFile file;
try (ArchiveStreamReader reader = ArchiveStreamReader.ofZip(new ByteArrayInputStream(zippedContent), options)) {
OutputStream discard = OutputStream.nullOutputStream();
while ((file = reader.readNextTo(discard)) != null) {
crcByEntry.put(file.path().toString(), file.crc32().orElse(-1));
}
}
Funnel<SortedMap<String, Long>> funnel = (from, into) -> from.forEach((key, value) -> {
into.putBytes(key.getBytes());
into.putLong(value);
});
return Hashing.sha1().newHasher()
.putObject(crcByEntry, funnel)
.putInt(deploymentSpec.deployableHashCode())
.hash().toString();
}
@SuppressWarnings("deprecation")
public static String calculateHash(byte[] bytes) {
return Hashing.sha1().newHasher()
.putBytes(bytes)
.hash().toString();
}
/** Maps normalized paths to cached content read from a zip archive. */
private static class ZipArchiveCache {
/** Max size of each extracted file */
private static final int maxSize = 10 << 20;
private final byte[] zip;
private final Map<Path, Optional<byte[]>> cache;
public ZipArchiveCache(byte[] zip, Collection<String> prePopulated) {
this.zip = zip;
this.cache = new ConcurrentSkipListMap<>();
this.cache.putAll(read(prePopulated));
}
public Optional<byte[]> get(String path) {
return get(Paths.get(path));
}
public Optional<byte[]> get(Path path) {
return cache.computeIfAbsent(path.normalize(), read(List.of(path.normalize().toString()))::get);
}
public FileSystemWrapper wrapper() {
return FileSystemWrapper.ofFiles(Path.of("./"),
path -> get(path).isPresent(),
path -> get(path).orElseThrow(() -> new NoSuchFileException(path.toString())));
}
private Map<Path, Optional<byte[]>> read(Collection<String> names) {
var entries = ZipEntries.from(zip,
name -> names.contains(name),
maxSize,
true)
.asList().stream()
.collect(toMap(entry -> Paths.get(entry.name()).normalize(),
ZipEntries.ZipEntryWithContent::content));
names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty()));
return entries;
}
}
} |
Sorry, this wasn't the relevant one. The `fromJson` is it. I'll make a PR for you :) | public static PrepareParams fromHttpRequest(HttpRequest request, TenantName tenant, Duration barrierTimeout) {
return new Builder().ignoreValidationErrors(request.getBooleanProperty(IGNORE_VALIDATION_PARAM_NAME))
.dryRun(request.getBooleanProperty(DRY_RUN_PARAM_NAME))
.verbose(request.getBooleanProperty(VERBOSE_PARAM_NAME))
.timeoutBudget(SessionHandler.getTimeoutBudget(request, barrierTimeout))
.applicationId(createApplicationId(request, tenant))
.tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME)))
.vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
.containerEndpoints(request.getProperty(CONTAINER_ENDPOINTS_PARAM_NAME))
.endpointCertificateMetadata(request.getProperty(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME))
.dockerImageRepository(request.getProperty(DOCKER_IMAGE_REPOSITORY))
.athenzDomain(request.getProperty(ATHENZ_DOMAIN))
.quota(request.getProperty(QUOTA_PARAM_NAME))
.tenantSecretStores(request.getProperty(TENANT_SECRET_STORES_PARAM_NAME))
.force(request.getBooleanProperty(FORCE_PARAM_NAME))
.waitForResourcesInPrepare(request.getBooleanProperty(WAIT_FOR_RESOURCES_IN_PREPARE))
.build();
} | .tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME))) | public static PrepareParams fromHttpRequest(HttpRequest request, TenantName tenant, Duration barrierTimeout) {
return new Builder().ignoreValidationErrors(request.getBooleanProperty(IGNORE_VALIDATION_PARAM_NAME))
.dryRun(request.getBooleanProperty(DRY_RUN_PARAM_NAME))
.verbose(request.getBooleanProperty(VERBOSE_PARAM_NAME))
.timeoutBudget(SessionHandler.getTimeoutBudget(request, barrierTimeout))
.applicationId(createApplicationId(request, tenant))
.tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME)))
.vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
.containerEndpoints(request.getProperty(CONTAINER_ENDPOINTS_PARAM_NAME))
.endpointCertificateMetadata(request.getProperty(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME))
.dockerImageRepository(request.getProperty(DOCKER_IMAGE_REPOSITORY))
.athenzDomain(request.getProperty(ATHENZ_DOMAIN))
.quota(request.getProperty(QUOTA_PARAM_NAME))
.tenantSecretStores(request.getProperty(TENANT_SECRET_STORES_PARAM_NAME))
.force(request.getBooleanProperty(FORCE_PARAM_NAME))
.waitForResourcesInPrepare(request.getBooleanProperty(WAIT_FOR_RESOURCES_IN_PREPARE))
.build();
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private boolean force = false;
private boolean waitForResourcesInPrepare = false;
private ApplicationId applicationId = null;
private Tags tags = Tags.empty();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = null;
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
private Optional<Quota> quota = Optional.empty();
private List<TenantSecretStore> tenantSecretStores = List.of();
private List<X509Certificate> operatorCertificates = List.of();
private Optional<CloudAccount> cloudAccount = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder tags(Tags tags) {
this.tags = tags;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
this.containerEndpoints = (serialized == null)
? List.of()
: ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized));
return this;
}
public Builder containerEndpointList(List<ContainerEndpoint> endpoints) {
this.containerEndpoints = endpoints;
return this;
}
public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) {
this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata);
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
: Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder dockerImageRepository(String dockerImageRepository) {
this.dockerImageRepository = (dockerImageRepository == null)
? Optional.empty()
: Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
this.dockerImageRepository = Optional.ofNullable(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain);
return this;
}
public Builder quota(Quota quota) {
this.quota = Optional.ofNullable(quota);
return this;
}
public Builder quota(String serialized) {
this.quota = (serialized == null)
? Optional.empty()
: Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder tenantSecretStores(String serialized) {
List<TenantSecretStore> secretStores = (serialized == null)
? List.of()
: TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get());
return tenantSecretStores(secretStores);
}
public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) {
this.tenantSecretStores = tenantSecretStores;
return this;
}
public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) {
this.waitForResourcesInPrepare = waitForResourcesInPrepare;
return this;
}
public Builder force(boolean force) {
this.force = force;
return this;
}
public Builder operatorCertificates(List<X509Certificate> operatorCertificates) {
this.operatorCertificates = List.copyOf(operatorCertificates);
return this;
}
public Builder cloudAccount(CloudAccount cloudAccount) {
this.cloudAccount = Optional.ofNullable(cloudAccount);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId,
tags,
timeoutBudget,
ignoreValidationErrors,
dryRun,
verbose,
isBootstrap,
vespaVersion,
containerEndpoints,
endpointCertificateMetadata,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
force,
waitForResourcesInPrepare,
operatorCertificates,
cloudAccount);
}
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private boolean force = false;
private boolean waitForResourcesInPrepare = false;
private ApplicationId applicationId = null;
private Tags tags = Tags.empty();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = null;
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
private Optional<Quota> quota = Optional.empty();
private List<TenantSecretStore> tenantSecretStores = List.of();
private List<X509Certificate> operatorCertificates = List.of();
private Optional<CloudAccount> cloudAccount = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder tags(Tags tags) {
this.tags = tags;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
this.containerEndpoints = (serialized == null)
? List.of()
: ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized));
return this;
}
public Builder containerEndpointList(List<ContainerEndpoint> endpoints) {
this.containerEndpoints = endpoints;
return this;
}
public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) {
this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata);
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
: Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder dockerImageRepository(String dockerImageRepository) {
this.dockerImageRepository = (dockerImageRepository == null)
? Optional.empty()
: Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
this.dockerImageRepository = Optional.ofNullable(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain);
return this;
}
public Builder quota(Quota quota) {
this.quota = Optional.ofNullable(quota);
return this;
}
public Builder quota(String serialized) {
this.quota = (serialized == null)
? Optional.empty()
: Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder tenantSecretStores(String serialized) {
List<TenantSecretStore> secretStores = (serialized == null)
? List.of()
: TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get());
return tenantSecretStores(secretStores);
}
public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) {
this.tenantSecretStores = tenantSecretStores;
return this;
}
public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) {
this.waitForResourcesInPrepare = waitForResourcesInPrepare;
return this;
}
public Builder force(boolean force) {
this.force = force;
return this;
}
public Builder operatorCertificates(List<X509Certificate> operatorCertificates) {
this.operatorCertificates = List.copyOf(operatorCertificates);
return this;
}
public Builder cloudAccount(CloudAccount cloudAccount) {
this.cloudAccount = Optional.ofNullable(cloudAccount);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId,
tags,
timeoutBudget,
ignoreValidationErrors,
dryRun,
verbose,
isBootstrap,
vespaVersion,
containerEndpoints,
endpointCertificateMetadata,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
force,
waitForResourcesInPrepare,
operatorCertificates,
cloudAccount);
}
} |
The fromJson does reads the tags already? | public static PrepareParams fromHttpRequest(HttpRequest request, TenantName tenant, Duration barrierTimeout) {
return new Builder().ignoreValidationErrors(request.getBooleanProperty(IGNORE_VALIDATION_PARAM_NAME))
.dryRun(request.getBooleanProperty(DRY_RUN_PARAM_NAME))
.verbose(request.getBooleanProperty(VERBOSE_PARAM_NAME))
.timeoutBudget(SessionHandler.getTimeoutBudget(request, barrierTimeout))
.applicationId(createApplicationId(request, tenant))
.tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME)))
.vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
.containerEndpoints(request.getProperty(CONTAINER_ENDPOINTS_PARAM_NAME))
.endpointCertificateMetadata(request.getProperty(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME))
.dockerImageRepository(request.getProperty(DOCKER_IMAGE_REPOSITORY))
.athenzDomain(request.getProperty(ATHENZ_DOMAIN))
.quota(request.getProperty(QUOTA_PARAM_NAME))
.tenantSecretStores(request.getProperty(TENANT_SECRET_STORES_PARAM_NAME))
.force(request.getBooleanProperty(FORCE_PARAM_NAME))
.waitForResourcesInPrepare(request.getBooleanProperty(WAIT_FOR_RESOURCES_IN_PREPARE))
.build();
} | .tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME))) | public static PrepareParams fromHttpRequest(HttpRequest request, TenantName tenant, Duration barrierTimeout) {
return new Builder().ignoreValidationErrors(request.getBooleanProperty(IGNORE_VALIDATION_PARAM_NAME))
.dryRun(request.getBooleanProperty(DRY_RUN_PARAM_NAME))
.verbose(request.getBooleanProperty(VERBOSE_PARAM_NAME))
.timeoutBudget(SessionHandler.getTimeoutBudget(request, barrierTimeout))
.applicationId(createApplicationId(request, tenant))
.tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME)))
.vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
.containerEndpoints(request.getProperty(CONTAINER_ENDPOINTS_PARAM_NAME))
.endpointCertificateMetadata(request.getProperty(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME))
.dockerImageRepository(request.getProperty(DOCKER_IMAGE_REPOSITORY))
.athenzDomain(request.getProperty(ATHENZ_DOMAIN))
.quota(request.getProperty(QUOTA_PARAM_NAME))
.tenantSecretStores(request.getProperty(TENANT_SECRET_STORES_PARAM_NAME))
.force(request.getBooleanProperty(FORCE_PARAM_NAME))
.waitForResourcesInPrepare(request.getBooleanProperty(WAIT_FOR_RESOURCES_IN_PREPARE))
.build();
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private boolean force = false;
private boolean waitForResourcesInPrepare = false;
private ApplicationId applicationId = null;
private Tags tags = Tags.empty();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = null;
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
private Optional<Quota> quota = Optional.empty();
private List<TenantSecretStore> tenantSecretStores = List.of();
private List<X509Certificate> operatorCertificates = List.of();
private Optional<CloudAccount> cloudAccount = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder tags(Tags tags) {
this.tags = tags;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
this.containerEndpoints = (serialized == null)
? List.of()
: ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized));
return this;
}
public Builder containerEndpointList(List<ContainerEndpoint> endpoints) {
this.containerEndpoints = endpoints;
return this;
}
public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) {
this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata);
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
: Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder dockerImageRepository(String dockerImageRepository) {
this.dockerImageRepository = (dockerImageRepository == null)
? Optional.empty()
: Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
this.dockerImageRepository = Optional.ofNullable(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain);
return this;
}
public Builder quota(Quota quota) {
this.quota = Optional.ofNullable(quota);
return this;
}
public Builder quota(String serialized) {
this.quota = (serialized == null)
? Optional.empty()
: Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder tenantSecretStores(String serialized) {
List<TenantSecretStore> secretStores = (serialized == null)
? List.of()
: TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get());
return tenantSecretStores(secretStores);
}
public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) {
this.tenantSecretStores = tenantSecretStores;
return this;
}
public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) {
this.waitForResourcesInPrepare = waitForResourcesInPrepare;
return this;
}
public Builder force(boolean force) {
this.force = force;
return this;
}
public Builder operatorCertificates(List<X509Certificate> operatorCertificates) {
this.operatorCertificates = List.copyOf(operatorCertificates);
return this;
}
public Builder cloudAccount(CloudAccount cloudAccount) {
this.cloudAccount = Optional.ofNullable(cloudAccount);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId,
tags,
timeoutBudget,
ignoreValidationErrors,
dryRun,
verbose,
isBootstrap,
vespaVersion,
containerEndpoints,
endpointCertificateMetadata,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
force,
waitForResourcesInPrepare,
operatorCertificates,
cloudAccount);
}
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private boolean force = false;
private boolean waitForResourcesInPrepare = false;
private ApplicationId applicationId = null;
private Tags tags = Tags.empty();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = null;
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
private Optional<Quota> quota = Optional.empty();
private List<TenantSecretStore> tenantSecretStores = List.of();
private List<X509Certificate> operatorCertificates = List.of();
private Optional<CloudAccount> cloudAccount = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder tags(Tags tags) {
this.tags = tags;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
this.containerEndpoints = (serialized == null)
? List.of()
: ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized));
return this;
}
public Builder containerEndpointList(List<ContainerEndpoint> endpoints) {
this.containerEndpoints = endpoints;
return this;
}
public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) {
this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata);
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
: Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder dockerImageRepository(String dockerImageRepository) {
this.dockerImageRepository = (dockerImageRepository == null)
? Optional.empty()
: Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
this.dockerImageRepository = Optional.ofNullable(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain);
return this;
}
public Builder quota(Quota quota) {
this.quota = Optional.ofNullable(quota);
return this;
}
public Builder quota(String serialized) {
this.quota = (serialized == null)
? Optional.empty()
: Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder tenantSecretStores(String serialized) {
List<TenantSecretStore> secretStores = (serialized == null)
? List.of()
: TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get());
return tenantSecretStores(secretStores);
}
public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) {
this.tenantSecretStores = tenantSecretStores;
return this;
}
public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) {
this.waitForResourcesInPrepare = waitForResourcesInPrepare;
return this;
}
public Builder force(boolean force) {
this.force = force;
return this;
}
public Builder operatorCertificates(List<X509Certificate> operatorCertificates) {
this.operatorCertificates = List.copyOf(operatorCertificates);
return this;
}
public Builder cloudAccount(CloudAccount cloudAccount) {
this.cloudAccount = Optional.ofNullable(cloudAccount);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId,
tags,
timeoutBudget,
ignoreValidationErrors,
dryRun,
verbose,
isBootstrap,
vespaVersion,
containerEndpoints,
endpointCertificateMetadata,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
force,
waitForResourcesInPrepare,
operatorCertificates,
cloudAccount);
}
} |
Yes, but they're not written anywhere. | public static PrepareParams fromHttpRequest(HttpRequest request, TenantName tenant, Duration barrierTimeout) {
return new Builder().ignoreValidationErrors(request.getBooleanProperty(IGNORE_VALIDATION_PARAM_NAME))
.dryRun(request.getBooleanProperty(DRY_RUN_PARAM_NAME))
.verbose(request.getBooleanProperty(VERBOSE_PARAM_NAME))
.timeoutBudget(SessionHandler.getTimeoutBudget(request, barrierTimeout))
.applicationId(createApplicationId(request, tenant))
.tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME)))
.vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
.containerEndpoints(request.getProperty(CONTAINER_ENDPOINTS_PARAM_NAME))
.endpointCertificateMetadata(request.getProperty(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME))
.dockerImageRepository(request.getProperty(DOCKER_IMAGE_REPOSITORY))
.athenzDomain(request.getProperty(ATHENZ_DOMAIN))
.quota(request.getProperty(QUOTA_PARAM_NAME))
.tenantSecretStores(request.getProperty(TENANT_SECRET_STORES_PARAM_NAME))
.force(request.getBooleanProperty(FORCE_PARAM_NAME))
.waitForResourcesInPrepare(request.getBooleanProperty(WAIT_FOR_RESOURCES_IN_PREPARE))
.build();
} | .tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME))) | public static PrepareParams fromHttpRequest(HttpRequest request, TenantName tenant, Duration barrierTimeout) {
return new Builder().ignoreValidationErrors(request.getBooleanProperty(IGNORE_VALIDATION_PARAM_NAME))
.dryRun(request.getBooleanProperty(DRY_RUN_PARAM_NAME))
.verbose(request.getBooleanProperty(VERBOSE_PARAM_NAME))
.timeoutBudget(SessionHandler.getTimeoutBudget(request, barrierTimeout))
.applicationId(createApplicationId(request, tenant))
.tags(Tags.fromString(request.getProperty(TAGS_PARAM_NAME)))
.vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
.containerEndpoints(request.getProperty(CONTAINER_ENDPOINTS_PARAM_NAME))
.endpointCertificateMetadata(request.getProperty(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME))
.dockerImageRepository(request.getProperty(DOCKER_IMAGE_REPOSITORY))
.athenzDomain(request.getProperty(ATHENZ_DOMAIN))
.quota(request.getProperty(QUOTA_PARAM_NAME))
.tenantSecretStores(request.getProperty(TENANT_SECRET_STORES_PARAM_NAME))
.force(request.getBooleanProperty(FORCE_PARAM_NAME))
.waitForResourcesInPrepare(request.getBooleanProperty(WAIT_FOR_RESOURCES_IN_PREPARE))
.build();
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private boolean force = false;
private boolean waitForResourcesInPrepare = false;
private ApplicationId applicationId = null;
private Tags tags = Tags.empty();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = null;
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
private Optional<Quota> quota = Optional.empty();
private List<TenantSecretStore> tenantSecretStores = List.of();
private List<X509Certificate> operatorCertificates = List.of();
private Optional<CloudAccount> cloudAccount = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder tags(Tags tags) {
this.tags = tags;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
this.containerEndpoints = (serialized == null)
? List.of()
: ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized));
return this;
}
public Builder containerEndpointList(List<ContainerEndpoint> endpoints) {
this.containerEndpoints = endpoints;
return this;
}
public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) {
this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata);
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
: Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder dockerImageRepository(String dockerImageRepository) {
this.dockerImageRepository = (dockerImageRepository == null)
? Optional.empty()
: Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
this.dockerImageRepository = Optional.ofNullable(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain);
return this;
}
public Builder quota(Quota quota) {
this.quota = Optional.ofNullable(quota);
return this;
}
public Builder quota(String serialized) {
this.quota = (serialized == null)
? Optional.empty()
: Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder tenantSecretStores(String serialized) {
List<TenantSecretStore> secretStores = (serialized == null)
? List.of()
: TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get());
return tenantSecretStores(secretStores);
}
public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) {
this.tenantSecretStores = tenantSecretStores;
return this;
}
public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) {
this.waitForResourcesInPrepare = waitForResourcesInPrepare;
return this;
}
public Builder force(boolean force) {
this.force = force;
return this;
}
public Builder operatorCertificates(List<X509Certificate> operatorCertificates) {
this.operatorCertificates = List.copyOf(operatorCertificates);
return this;
}
public Builder cloudAccount(CloudAccount cloudAccount) {
this.cloudAccount = Optional.ofNullable(cloudAccount);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId,
tags,
timeoutBudget,
ignoreValidationErrors,
dryRun,
verbose,
isBootstrap,
vespaVersion,
containerEndpoints,
endpointCertificateMetadata,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
force,
waitForResourcesInPrepare,
operatorCertificates,
cloudAccount);
}
} | class Builder {
private boolean ignoreValidationErrors = false;
private boolean dryRun = false;
private boolean verbose = false;
private boolean isBootstrap = false;
private boolean force = false;
private boolean waitForResourcesInPrepare = false;
private ApplicationId applicationId = null;
private Tags tags = Tags.empty();
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(60));
private Optional<Version> vespaVersion = Optional.empty();
private List<ContainerEndpoint> containerEndpoints = null;
private Optional<EndpointCertificateMetadata> endpointCertificateMetadata = Optional.empty();
private Optional<DockerImage> dockerImageRepository = Optional.empty();
private Optional<AthenzDomain> athenzDomain = Optional.empty();
private Optional<Quota> quota = Optional.empty();
private List<TenantSecretStore> tenantSecretStores = List.of();
private List<X509Certificate> operatorCertificates = List.of();
private Optional<CloudAccount> cloudAccount = Optional.empty();
public Builder() { }
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
}
public Builder tags(Tags tags) {
this.tags = tags;
return this;
}
public Builder ignoreValidationErrors(boolean ignoreValidationErrors) {
this.ignoreValidationErrors = ignoreValidationErrors;
return this;
}
public Builder dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
public Builder verbose(boolean verbose) {
this.verbose = verbose;
return this;
}
public Builder isBootstrap(boolean isBootstrap) {
this.isBootstrap = isBootstrap;
return this;
}
public Builder timeoutBudget(TimeoutBudget timeoutBudget) {
this.timeoutBudget = timeoutBudget;
return this;
}
public Builder vespaVersion(String vespaVersion) {
Optional<Version> version = Optional.empty();
if (vespaVersion != null && !vespaVersion.isEmpty()) {
version = Optional.of(Version.fromString(vespaVersion));
}
this.vespaVersion = version;
return this;
}
public Builder vespaVersion(Version vespaVersion) {
this.vespaVersion = Optional.ofNullable(vespaVersion);
return this;
}
public Builder containerEndpoints(String serialized) {
this.containerEndpoints = (serialized == null)
? List.of()
: ContainerEndpointSerializer.endpointListFromSlime(SlimeUtils.jsonToSlime(serialized));
return this;
}
public Builder containerEndpointList(List<ContainerEndpoint> endpoints) {
this.containerEndpoints = endpoints;
return this;
}
public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) {
this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata);
return this;
}
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
: Optional.of(EndpointCertificateMetadataSerializer.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder dockerImageRepository(String dockerImageRepository) {
this.dockerImageRepository = (dockerImageRepository == null)
? Optional.empty()
: Optional.of(DockerImage.fromString(dockerImageRepository));
return this;
}
public Builder dockerImageRepository(DockerImage dockerImageRepository) {
this.dockerImageRepository = Optional.ofNullable(dockerImageRepository);
return this;
}
public Builder athenzDomain(String athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain).map(AthenzDomain::from);
return this;
}
public Builder athenzDomain(AthenzDomain athenzDomain) {
this.athenzDomain = Optional.ofNullable(athenzDomain);
return this;
}
public Builder quota(Quota quota) {
this.quota = Optional.ofNullable(quota);
return this;
}
public Builder quota(String serialized) {
this.quota = (serialized == null)
? Optional.empty()
: Optional.of(Quota.fromSlime(SlimeUtils.jsonToSlime(serialized).get()));
return this;
}
public Builder tenantSecretStores(String serialized) {
List<TenantSecretStore> secretStores = (serialized == null)
? List.of()
: TenantSecretStoreSerializer.listFromSlime(SlimeUtils.jsonToSlime(serialized).get());
return tenantSecretStores(secretStores);
}
public Builder tenantSecretStores(List<TenantSecretStore> tenantSecretStores) {
this.tenantSecretStores = tenantSecretStores;
return this;
}
public Builder waitForResourcesInPrepare(boolean waitForResourcesInPrepare) {
this.waitForResourcesInPrepare = waitForResourcesInPrepare;
return this;
}
public Builder force(boolean force) {
this.force = force;
return this;
}
public Builder operatorCertificates(List<X509Certificate> operatorCertificates) {
this.operatorCertificates = List.copyOf(operatorCertificates);
return this;
}
public Builder cloudAccount(CloudAccount cloudAccount) {
this.cloudAccount = Optional.ofNullable(cloudAccount);
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId,
tags,
timeoutBudget,
ignoreValidationErrors,
dryRun,
verbose,
isBootstrap,
vespaVersion,
containerEndpoints,
endpointCertificateMetadata,
dockerImageRepository,
athenzDomain,
quota,
tenantSecretStores,
force,
waitForResourcesInPrepare,
operatorCertificates,
cloudAccount);
}
} |
How about this? ```java assertEquals(new Tags(Set.of("tag1", "tag2")), Tags.fromString(" tag1 tag2 ")); ``` | public void testEmpty() {
assertEquals(Tags.empty(), Tags.fromString(null));
assertEquals(Tags.empty(), Tags.fromString(""));
assertEquals(Tags.empty(), Tags.fromString(" "));
} | } | public void testEmpty() {
assertEquals(Tags.empty(), Tags.fromString(null));
assertEquals(Tags.empty(), Tags.fromString(""));
assertEquals(Tags.empty(), Tags.fromString(" "));
} | class TagsTest {
@Test
@Test
public void testSerialization() {
Tags tags = new Tags(Set.of("a", "tag2", "3"));
assertEquals(tags, Tags.fromString(tags.toString()));
assertEquals(tags, Tags.fromString(tags.asString()));
}
@Test
public void testContains() {
Tags tags = new Tags(Set.of("a", "tag2", "3"));
assertTrue(tags.contains("a"));
assertTrue(tags.contains("tag2"));
assertTrue(tags.contains("3"));
assertFalse(tags.contains("other"));
Tags subTags = new Tags(Set.of("a", "3"));
assertTrue(tags.containsAll(subTags));
assertFalse(subTags.containsAll(tags));
}
@Test
public void testIntersects() {
Tags tags1 = new Tags(Set.of("a", "tag2", "3"));
Tags tags2 = new Tags(Set.of("a", "tag3"));
assertTrue(tags1.intersects(tags2));
assertTrue(tags2.intersects(tags1));
}
} | class TagsTest {
@Test
@Test
public void testDeserialization() {
assertEquals(new Tags(Set.of("tag1", "tag2")), Tags.fromString(" tag1 tag2 "));
}
@Test
public void testSerialization() {
Tags tags = new Tags(Set.of("a", "tag2", "3"));
assertEquals(tags, Tags.fromString(tags.toString()));
assertEquals(tags, Tags.fromString(tags.asString()));
}
@Test
public void testContains() {
Tags tags = new Tags(Set.of("a", "tag2", "3"));
assertTrue(tags.contains("a"));
assertTrue(tags.contains("tag2"));
assertTrue(tags.contains("3"));
assertFalse(tags.contains("other"));
Tags subTags = new Tags(Set.of("a", "3"));
assertTrue(tags.containsAll(subTags));
assertFalse(subTags.containsAll(tags));
}
@Test
public void testIntersects() {
Tags tags1 = new Tags(Set.of("a", "tag2", "3"));
Tags tags2 = new Tags(Set.of("a", "tag3"));
assertTrue(tags1.intersects(tags2));
assertTrue(tags2.intersects(tags1));
}
} |
This currently fails with duplicate element (`""`). | public void testEmpty() {
assertEquals(Tags.empty(), Tags.fromString(null));
assertEquals(Tags.empty(), Tags.fromString(""));
assertEquals(Tags.empty(), Tags.fromString(" "));
} | } | public void testEmpty() {
assertEquals(Tags.empty(), Tags.fromString(null));
assertEquals(Tags.empty(), Tags.fromString(""));
assertEquals(Tags.empty(), Tags.fromString(" "));
} | class TagsTest {
@Test
@Test
public void testSerialization() {
Tags tags = new Tags(Set.of("a", "tag2", "3"));
assertEquals(tags, Tags.fromString(tags.toString()));
assertEquals(tags, Tags.fromString(tags.asString()));
}
@Test
public void testContains() {
Tags tags = new Tags(Set.of("a", "tag2", "3"));
assertTrue(tags.contains("a"));
assertTrue(tags.contains("tag2"));
assertTrue(tags.contains("3"));
assertFalse(tags.contains("other"));
Tags subTags = new Tags(Set.of("a", "3"));
assertTrue(tags.containsAll(subTags));
assertFalse(subTags.containsAll(tags));
}
@Test
public void testIntersects() {
Tags tags1 = new Tags(Set.of("a", "tag2", "3"));
Tags tags2 = new Tags(Set.of("a", "tag3"));
assertTrue(tags1.intersects(tags2));
assertTrue(tags2.intersects(tags1));
}
} | class TagsTest {
@Test
@Test
public void testDeserialization() {
assertEquals(new Tags(Set.of("tag1", "tag2")), Tags.fromString(" tag1 tag2 "));
}
@Test
public void testSerialization() {
Tags tags = new Tags(Set.of("a", "tag2", "3"));
assertEquals(tags, Tags.fromString(tags.toString()));
assertEquals(tags, Tags.fromString(tags.asString()));
}
@Test
public void testContains() {
Tags tags = new Tags(Set.of("a", "tag2", "3"));
assertTrue(tags.contains("a"));
assertTrue(tags.contains("tag2"));
assertTrue(tags.contains("3"));
assertFalse(tags.contains("other"));
Tags subTags = new Tags(Set.of("a", "3"));
assertTrue(tags.containsAll(subTags));
assertFalse(subTags.containsAll(tags));
}
@Test
public void testIntersects() {
Tags tags1 = new Tags(Set.of("a", "tag2", "3"));
Tags tags2 = new Tags(Set.of("a", "tag3"));
assertTrue(tags1.intersects(tags2));
assertTrue(tags2.intersects(tags1));
}
} |
```suggestion } @Test public void testDeserialization() { assertEquals(new Tags(Set.of("tag1", "tag2")), Tags.fromString(" tag1 tag2 ")); } ``` | public void testEmpty() {
assertEquals(Tags.empty(), Tags.fromString(null));
assertEquals(Tags.empty(), Tags.fromString(""));
assertEquals(Tags.empty(), Tags.fromString(" "));
} | } | public void testEmpty() {
assertEquals(Tags.empty(), Tags.fromString(null));
assertEquals(Tags.empty(), Tags.fromString(""));
assertEquals(Tags.empty(), Tags.fromString(" "));
} | class TagsTest {
@Test
@Test
public void testSerialization() {
Tags tags = new Tags(Set.of("a", "tag2", "3"));
assertEquals(tags, Tags.fromString(tags.toString()));
assertEquals(tags, Tags.fromString(tags.asString()));
}
@Test
public void testContains() {
Tags tags = new Tags(Set.of("a", "tag2", "3"));
assertTrue(tags.contains("a"));
assertTrue(tags.contains("tag2"));
assertTrue(tags.contains("3"));
assertFalse(tags.contains("other"));
Tags subTags = new Tags(Set.of("a", "3"));
assertTrue(tags.containsAll(subTags));
assertFalse(subTags.containsAll(tags));
}
@Test
public void testIntersects() {
Tags tags1 = new Tags(Set.of("a", "tag2", "3"));
Tags tags2 = new Tags(Set.of("a", "tag3"));
assertTrue(tags1.intersects(tags2));
assertTrue(tags2.intersects(tags1));
}
} | class TagsTest {
@Test
@Test
public void testDeserialization() {
assertEquals(new Tags(Set.of("tag1", "tag2")), Tags.fromString(" tag1 tag2 "));
}
@Test
public void testSerialization() {
Tags tags = new Tags(Set.of("a", "tag2", "3"));
assertEquals(tags, Tags.fromString(tags.toString()));
assertEquals(tags, Tags.fromString(tags.asString()));
}
@Test
public void testContains() {
Tags tags = new Tags(Set.of("a", "tag2", "3"));
assertTrue(tags.contains("a"));
assertTrue(tags.contains("tag2"));
assertTrue(tags.contains("3"));
assertFalse(tags.contains("other"));
Tags subTags = new Tags(Set.of("a", "3"));
assertTrue(tags.containsAll(subTags));
assertFalse(subTags.containsAll(tags));
}
@Test
public void testIntersects() {
Tags tags1 = new Tags(Set.of("a", "tag2", "3"));
Tags tags2 = new Tags(Set.of("a", "tag3"));
assertTrue(tags1.intersects(tags2));
assertTrue(tags2.intersects(tags1));
}
} |
```suggestion return new Tags(Set.of(tagsString.trim().split(" +"))); ``` Kill empty strings | public static Tags fromString(String tagsString) {
if (tagsString == null || tagsString.isBlank()) return empty();
return new Tags(Set.of(tagsString.trim().split(" ")));
} | return new Tags(Set.of(tagsString.trim().split(" "))); | public static Tags fromString(String tagsString) {
if (tagsString == null || tagsString.isBlank()) return empty();
return new Tags(Set.of(tagsString.trim().split(" +")));
} | class Tags {
private final Set<String> tags;
public Tags(Set<String> tags) {
this.tags = Set.copyOf(tags);
}
public boolean contains(String tag) {
return tags.contains(tag);
}
public boolean intersects(Tags other) {
return this.tags.stream().anyMatch(other::contains);
}
public boolean isEmpty() { return tags.isEmpty(); }
public boolean containsAll(Tags other) { return tags.containsAll(other.tags); }
/** Returns this as a space-separated string which can be used to recreate this by calling fromString(). */
public String asString() { return String.join(" ", tags); }
@Override
public String toString() {
return asString();
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if (other == null || other.getClass() != getClass()) return false;
return tags.equals(((Tags)other).tags);
}
@Override
public int hashCode() {
return tags.hashCode();
}
public static Tags empty() { return new Tags(Set.of()); }
/**
* Creates this from a space-separated string or null. */
} | class Tags {
private final Set<String> tags;
public Tags(Set<String> tags) {
this.tags = Set.copyOf(tags);
}
public boolean contains(String tag) {
return tags.contains(tag);
}
public boolean intersects(Tags other) {
return this.tags.stream().anyMatch(other::contains);
}
public boolean isEmpty() { return tags.isEmpty(); }
public boolean containsAll(Tags other) { return tags.containsAll(other.tags); }
/** Returns this as a space-separated string which can be used to recreate this by calling fromString(). */
public String asString() {
return tags.stream().sorted().collect(Collectors.joining(" "));
}
@Override
public String toString() {
return asString();
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if (other == null || other.getClass() != getClass()) return false;
return tags.equals(((Tags)other).tags);
}
@Override
public int hashCode() {
return tags.hashCode();
}
public static Tags empty() { return new Tags(Set.of()); }
/**
* Creates this from a space-separated string or null. */
} |
That exception is caught and re-thrown several places, and being an unchecked exception, I'm not sure I want to mess around with that. | public HttpResponse handle(HttpRequest request) {
log.log(Level.FINE, () -> request.getMethod() + " " + request.getUri().toString());
try {
return switch (request.getMethod()) {
case POST -> handlePOST(request);
case GET -> handleGET(request);
case PUT -> handlePUT(request);
case DELETE -> handleDELETE(request);
default -> createErrorResponse(request.getMethod());
};
} catch (NotFoundException | com.yahoo.vespa.config.server.NotFoundException e) {
return HttpErrorResponse.notFoundError(getMessage(e, request));
} catch (ActivationConflictException e) {
return HttpErrorResponse.conflictWhenActivating(getMessage(e, request));
} catch (InvalidApplicationException e) {
return HttpErrorResponse.invalidApplicationPackage(getMessage(e, request));
} catch (IllegalArgumentException | UnsupportedOperationException e) {
return HttpErrorResponse.badRequest(getMessage(e, request));
} catch (NodeAllocationException e) {
return e.retryable() ? HttpErrorResponse.nodeAllocationFailure(getMessage(e, request))
: HttpErrorResponse.invalidApplicationPackage(getMessage(e, request));
} catch (InternalServerException e) {
return HttpErrorResponse.internalServerError(getMessage(e, request));
} catch (UnknownVespaVersionException e) {
return HttpErrorResponse.unknownVespaVersion(getMessage(e, request));
} catch (RequestTimeoutException e) {
return HttpErrorResponse.requestTimeout(getMessage(e, request));
} catch (ApplicationLockException e) {
return HttpErrorResponse.applicationLockFailure(getMessage(e, request));
} catch (ParentHostUnavailableException e) {
return HttpErrorResponse.parentHostNotReady(getMessage(e, request));
} catch (CertificateNotReadyException e) {
return HttpErrorResponse.certificateNotReady(getMessage(e, request));
} catch (ConfigNotConvergedException e) {
return HttpErrorResponse.configNotConverged(getMessage(e, request));
} catch (LoadBalancerServiceException e) {
return HttpErrorResponse.loadBalancerNotReady(getMessage(e, request));
} catch (ReindexingStatusException e) {
return HttpErrorResponse.reindexingStatusUnavailable(getMessage(e, request));
} catch (Exception e) {
log.log(Level.WARNING, "Unexpected exception handling a config server request", e);
return HttpErrorResponse.internalServerError(getMessage(e, request));
}
} | return e.retryable() ? HttpErrorResponse.nodeAllocationFailure(getMessage(e, request)) | public HttpResponse handle(HttpRequest request) {
log.log(Level.FINE, () -> request.getMethod() + " " + request.getUri().toString());
try {
return switch (request.getMethod()) {
case POST -> handlePOST(request);
case GET -> handleGET(request);
case PUT -> handlePUT(request);
case DELETE -> handleDELETE(request);
default -> createErrorResponse(request.getMethod());
};
} catch (NotFoundException | com.yahoo.vespa.config.server.NotFoundException e) {
return HttpErrorResponse.notFoundError(getMessage(e, request));
} catch (ActivationConflictException e) {
return HttpErrorResponse.conflictWhenActivating(getMessage(e, request));
} catch (InvalidApplicationException e) {
return HttpErrorResponse.invalidApplicationPackage(getMessage(e, request));
} catch (IllegalArgumentException | UnsupportedOperationException e) {
return HttpErrorResponse.badRequest(getMessage(e, request));
} catch (NodeAllocationException e) {
return e.retryable() ? HttpErrorResponse.nodeAllocationFailure(getMessage(e, request))
: HttpErrorResponse.invalidApplicationPackage(getMessage(e, request));
} catch (InternalServerException e) {
return HttpErrorResponse.internalServerError(getMessage(e, request));
} catch (UnknownVespaVersionException e) {
return HttpErrorResponse.unknownVespaVersion(getMessage(e, request));
} catch (RequestTimeoutException e) {
return HttpErrorResponse.requestTimeout(getMessage(e, request));
} catch (ApplicationLockException e) {
return HttpErrorResponse.applicationLockFailure(getMessage(e, request));
} catch (ParentHostUnavailableException e) {
return HttpErrorResponse.parentHostNotReady(getMessage(e, request));
} catch (CertificateNotReadyException e) {
return HttpErrorResponse.certificateNotReady(getMessage(e, request));
} catch (ConfigNotConvergedException e) {
return HttpErrorResponse.configNotConverged(getMessage(e, request));
} catch (LoadBalancerServiceException e) {
return HttpErrorResponse.loadBalancerNotReady(getMessage(e, request));
} catch (ReindexingStatusException e) {
return HttpErrorResponse.reindexingStatusUnavailable(getMessage(e, request));
} catch (Exception e) {
log.log(Level.WARNING, "Unexpected exception handling a config server request", e);
return HttpErrorResponse.internalServerError(getMessage(e, request));
}
} | class HttpHandler extends ThreadedHttpRequestHandler {
public HttpHandler(HttpHandler.Context ctx) {
super(ctx);
}
@Override
protected static Duration getRequestTimeout(HttpRequest request, Duration defaultTimeout) {
if ( ! request.hasProperty("timeout")) {
return defaultTimeout;
}
try {
return Duration.ofMillis((long) (Double.parseDouble(request.getProperty("timeout")) * 1000));
} catch (Exception e) {
return defaultTimeout;
}
}
private String getMessage(Exception e, HttpRequest request) {
if (request.getBooleanProperty("debug")) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
return sw.toString();
} else {
return Exceptions.toMessageString(e);
}
}
/**
* Default implementation of handler for GET requests. Returns an error response.
* Override this method to handle GET requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handleGET(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for POST requests. Returns an error response.
* Override this method to handle POST requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handlePOST(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for PUT requests. Returns an error response.
* Override this method to handle POST requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handlePUT(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for DELETE requests. Returns an error response.
* Override this method to handle DELETE requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handleDELETE(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Creates error response when method is not handled
*
* @return an error response with response code 405
*/
private HttpResponse createErrorResponse(com.yahoo.jdisc.http.HttpRequest.Method method) {
return HttpErrorResponse.methodNotAllowed("Method '" + method + "' is not supported");
}
} | class HttpHandler extends ThreadedHttpRequestHandler {
public HttpHandler(HttpHandler.Context ctx) {
super(ctx);
}
@Override
protected static Duration getRequestTimeout(HttpRequest request, Duration defaultTimeout) {
if ( ! request.hasProperty("timeout")) {
return defaultTimeout;
}
try {
return Duration.ofMillis((long) (Double.parseDouble(request.getProperty("timeout")) * 1000));
} catch (Exception e) {
return defaultTimeout;
}
}
private String getMessage(Exception e, HttpRequest request) {
if (request.getBooleanProperty("debug")) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
return sw.toString();
} else {
return Exceptions.toMessageString(e);
}
}
/**
* Default implementation of handler for GET requests. Returns an error response.
* Override this method to handle GET requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handleGET(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for POST requests. Returns an error response.
* Override this method to handle POST requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handlePOST(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for PUT requests. Returns an error response.
* Override this method to handle POST requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handlePUT(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for DELETE requests. Returns an error response.
* Override this method to handle DELETE requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handleDELETE(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Creates error response when method is not handled
*
* @return an error response with response code 405
*/
private HttpResponse createErrorResponse(com.yahoo.jdisc.http.HttpRequest.Method method) {
return HttpErrorResponse.methodNotAllowed("Method '" + method + "' is not supported");
}
} |
The NodeAllicationException, that is. | public HttpResponse handle(HttpRequest request) {
log.log(Level.FINE, () -> request.getMethod() + " " + request.getUri().toString());
try {
return switch (request.getMethod()) {
case POST -> handlePOST(request);
case GET -> handleGET(request);
case PUT -> handlePUT(request);
case DELETE -> handleDELETE(request);
default -> createErrorResponse(request.getMethod());
};
} catch (NotFoundException | com.yahoo.vespa.config.server.NotFoundException e) {
return HttpErrorResponse.notFoundError(getMessage(e, request));
} catch (ActivationConflictException e) {
return HttpErrorResponse.conflictWhenActivating(getMessage(e, request));
} catch (InvalidApplicationException e) {
return HttpErrorResponse.invalidApplicationPackage(getMessage(e, request));
} catch (IllegalArgumentException | UnsupportedOperationException e) {
return HttpErrorResponse.badRequest(getMessage(e, request));
} catch (NodeAllocationException e) {
return e.retryable() ? HttpErrorResponse.nodeAllocationFailure(getMessage(e, request))
: HttpErrorResponse.invalidApplicationPackage(getMessage(e, request));
} catch (InternalServerException e) {
return HttpErrorResponse.internalServerError(getMessage(e, request));
} catch (UnknownVespaVersionException e) {
return HttpErrorResponse.unknownVespaVersion(getMessage(e, request));
} catch (RequestTimeoutException e) {
return HttpErrorResponse.requestTimeout(getMessage(e, request));
} catch (ApplicationLockException e) {
return HttpErrorResponse.applicationLockFailure(getMessage(e, request));
} catch (ParentHostUnavailableException e) {
return HttpErrorResponse.parentHostNotReady(getMessage(e, request));
} catch (CertificateNotReadyException e) {
return HttpErrorResponse.certificateNotReady(getMessage(e, request));
} catch (ConfigNotConvergedException e) {
return HttpErrorResponse.configNotConverged(getMessage(e, request));
} catch (LoadBalancerServiceException e) {
return HttpErrorResponse.loadBalancerNotReady(getMessage(e, request));
} catch (ReindexingStatusException e) {
return HttpErrorResponse.reindexingStatusUnavailable(getMessage(e, request));
} catch (Exception e) {
log.log(Level.WARNING, "Unexpected exception handling a config server request", e);
return HttpErrorResponse.internalServerError(getMessage(e, request));
}
} | return e.retryable() ? HttpErrorResponse.nodeAllocationFailure(getMessage(e, request)) | public HttpResponse handle(HttpRequest request) {
log.log(Level.FINE, () -> request.getMethod() + " " + request.getUri().toString());
try {
return switch (request.getMethod()) {
case POST -> handlePOST(request);
case GET -> handleGET(request);
case PUT -> handlePUT(request);
case DELETE -> handleDELETE(request);
default -> createErrorResponse(request.getMethod());
};
} catch (NotFoundException | com.yahoo.vespa.config.server.NotFoundException e) {
return HttpErrorResponse.notFoundError(getMessage(e, request));
} catch (ActivationConflictException e) {
return HttpErrorResponse.conflictWhenActivating(getMessage(e, request));
} catch (InvalidApplicationException e) {
return HttpErrorResponse.invalidApplicationPackage(getMessage(e, request));
} catch (IllegalArgumentException | UnsupportedOperationException e) {
return HttpErrorResponse.badRequest(getMessage(e, request));
} catch (NodeAllocationException e) {
return e.retryable() ? HttpErrorResponse.nodeAllocationFailure(getMessage(e, request))
: HttpErrorResponse.invalidApplicationPackage(getMessage(e, request));
} catch (InternalServerException e) {
return HttpErrorResponse.internalServerError(getMessage(e, request));
} catch (UnknownVespaVersionException e) {
return HttpErrorResponse.unknownVespaVersion(getMessage(e, request));
} catch (RequestTimeoutException e) {
return HttpErrorResponse.requestTimeout(getMessage(e, request));
} catch (ApplicationLockException e) {
return HttpErrorResponse.applicationLockFailure(getMessage(e, request));
} catch (ParentHostUnavailableException e) {
return HttpErrorResponse.parentHostNotReady(getMessage(e, request));
} catch (CertificateNotReadyException e) {
return HttpErrorResponse.certificateNotReady(getMessage(e, request));
} catch (ConfigNotConvergedException e) {
return HttpErrorResponse.configNotConverged(getMessage(e, request));
} catch (LoadBalancerServiceException e) {
return HttpErrorResponse.loadBalancerNotReady(getMessage(e, request));
} catch (ReindexingStatusException e) {
return HttpErrorResponse.reindexingStatusUnavailable(getMessage(e, request));
} catch (Exception e) {
log.log(Level.WARNING, "Unexpected exception handling a config server request", e);
return HttpErrorResponse.internalServerError(getMessage(e, request));
}
} | class HttpHandler extends ThreadedHttpRequestHandler {
public HttpHandler(HttpHandler.Context ctx) {
super(ctx);
}
@Override
protected static Duration getRequestTimeout(HttpRequest request, Duration defaultTimeout) {
if ( ! request.hasProperty("timeout")) {
return defaultTimeout;
}
try {
return Duration.ofMillis((long) (Double.parseDouble(request.getProperty("timeout")) * 1000));
} catch (Exception e) {
return defaultTimeout;
}
}
private String getMessage(Exception e, HttpRequest request) {
if (request.getBooleanProperty("debug")) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
return sw.toString();
} else {
return Exceptions.toMessageString(e);
}
}
/**
* Default implementation of handler for GET requests. Returns an error response.
* Override this method to handle GET requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handleGET(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for POST requests. Returns an error response.
* Override this method to handle POST requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handlePOST(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for PUT requests. Returns an error response.
* Override this method to handle POST requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handlePUT(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for DELETE requests. Returns an error response.
* Override this method to handle DELETE requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handleDELETE(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Creates error response when method is not handled
*
* @return an error response with response code 405
*/
private HttpResponse createErrorResponse(com.yahoo.jdisc.http.HttpRequest.Method method) {
return HttpErrorResponse.methodNotAllowed("Method '" + method + "' is not supported");
}
} | class HttpHandler extends ThreadedHttpRequestHandler {
public HttpHandler(HttpHandler.Context ctx) {
super(ctx);
}
@Override
protected static Duration getRequestTimeout(HttpRequest request, Duration defaultTimeout) {
if ( ! request.hasProperty("timeout")) {
return defaultTimeout;
}
try {
return Duration.ofMillis((long) (Double.parseDouble(request.getProperty("timeout")) * 1000));
} catch (Exception e) {
return defaultTimeout;
}
}
private String getMessage(Exception e, HttpRequest request) {
if (request.getBooleanProperty("debug")) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
return sw.toString();
} else {
return Exceptions.toMessageString(e);
}
}
/**
* Default implementation of handler for GET requests. Returns an error response.
* Override this method to handle GET requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handleGET(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for POST requests. Returns an error response.
* Override this method to handle POST requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handlePOST(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for PUT requests. Returns an error response.
* Override this method to handle POST requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handlePUT(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Default implementation of handler for DELETE requests. Returns an error response.
* Override this method to handle DELETE requests.
*
* @param request a {@link HttpRequest}
* @return an error response with response code 405
*/
protected HttpResponse handleDELETE(HttpRequest request) {
return createErrorResponse(request.getMethod());
}
/**
* Creates error response when method is not handled
*
* @return an error response with response code 405
*/
private HttpResponse createErrorResponse(com.yahoo.jdisc.http.HttpRequest.Method method) {
return HttpErrorResponse.methodNotAllowed("Method '" + method + "' is not supported");
}
} |
```suggestion ``` | public void getConfig(CuratorConfig.Builder builder) {
if (getParent() instanceof ConfigserverCluster) return;
super.getConfig(builder);
builder.zookeeperSessionTimeoutSeconds(zookeeperSessionTimeoutSeconds);
} | if (getParent() instanceof ConfigserverCluster) return; | public void getConfig(CuratorConfig.Builder builder) {
super.getConfig(builder);
if (getParent() instanceof ConfigserverCluster) return;
builder.zookeeperSessionTimeoutSeconds(zookeeperSessionTimeoutSeconds);
} | class ApplicationContainerCluster extends ContainerCluster<ApplicationContainer> implements
ApplicationBundlesConfig.Producer,
QrStartConfig.Producer,
RankProfilesConfig.Producer,
RankingConstantsConfig.Producer,
OnnxModelsConfig.Producer,
RankingExpressionsConfig.Producer,
ContainerMbusConfig.Producer,
MetricsProxyApiConfig.Producer,
ZookeeperServerConfig.Producer,
ApplicationClusterInfo {
public static final String METRICS_V2_HANDLER_CLASS = MetricsV2Handler.class.getName();
public static final BindingPattern METRICS_V2_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH);
public static final BindingPattern METRICS_V2_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH + "/*");
public static final String PROMETHEUS_V1_HANDLER_CLASS = PrometheusV1Handler.class.getName();
private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH);
private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH + "/*");
public static final int heapSizePercentageOfTotalNodeMemory = 70;
public static final int heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster = 18;
private final Set<FileReference> applicationBundles = new LinkedHashSet<>();
private final Set<String> previousHosts;
private ContainerModelEvaluation modelEvaluation;
private final Optional<String> tlsClientAuthority;
private MbusParams mbusParams;
private boolean messageBusEnabled = true;
private int zookeeperSessionTimeoutSeconds = 12;
private final int transport_events_before_wakeup;
private final int transport_connections_per_target;
private Integer memoryPercentage = null;
private List<ApplicationClusterEndpoint> endpointList = List.of();
public ApplicationContainerCluster(AbstractConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState) {
super(parent, configSubId, clusterId, deployState, true, 10);
this.tlsClientAuthority = deployState.tlsClientAuthority();
previousHosts = Collections.unmodifiableSet(deployState.getPreviousModel().stream()
.map(Model::allocatedHosts)
.map(AllocatedHosts::getHosts)
.flatMap(Collection::stream)
.map(HostSpec::hostname)
.collect(Collectors.toCollection(() -> new LinkedHashSet<>())));
addSimpleComponent("com.yahoo.language.provider.DefaultLinguisticsProvider");
addSimpleComponent("com.yahoo.language.provider.DefaultEmbedderProvider");
addSimpleComponent("com.yahoo.container.jdisc.SecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.CertificateStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.AthenzIdentityProviderProvider");
addSimpleComponent(com.yahoo.container.core.documentapi.DocumentAccessProvider.class.getName());
addSimpleComponent(DOCUMENT_TYPE_MANAGER_CLASS);
addMetricsHandlers();
addTestrunnerComponentsIfTester(deployState);
transport_connections_per_target = deployState.featureFlags().mbusJavaRpcNumTargets();
transport_events_before_wakeup = deployState.featureFlags().mbusJavaEventsBeforeWakeup();
}
@Override
protected void doPrepare(DeployState deployState) {
addAndSendApplicationBundles(deployState);
sendUserConfiguredFiles(deployState);
createEndpointList(deployState);
}
private void addAndSendApplicationBundles(DeployState deployState) {
for (ComponentInfo component : deployState.getApplicationPackage().getComponentsInfo(deployState.getVespaVersion())) {
FileReference reference = deployState.getFileRegistry().addFile(component.getPathRelativeToAppDir());
applicationBundles.add(reference);
}
}
private void sendUserConfiguredFiles(DeployState deployState) {
FileSender fileSender = new FileSender(containers, deployState.getFileRegistry(), deployState.getDeployLogger());
for (Component<?, ?> component : getAllComponents()) {
fileSender.sendUserConfiguredFiles(component);
}
}
private void addMetricsHandlers() {
addMetricsHandler(METRICS_V2_HANDLER_CLASS, METRICS_V2_HANDLER_BINDING_1, METRICS_V2_HANDLER_BINDING_2);
addMetricsHandler(PROMETHEUS_V1_HANDLER_CLASS, PROMETHEUS_V1_HANDLER_BINDING_1, PROMETHEUS_V1_HANDLER_BINDING_2);
}
private void addMetricsHandler(String handlerClass, BindingPattern rootBinding, BindingPattern innerBinding) {
Handler handler = new Handler(
new ComponentModel(handlerClass, null, null, null));
handler.addServerBindings(rootBinding, innerBinding);
addComponent(handler);
}
private void addTestrunnerComponentsIfTester(DeployState deployState) {
if (deployState.isHosted() && deployState.getProperties().applicationId().instance().isTester()) {
addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-testrunner-components"));
addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-osgi-testrunner"));
addPlatformBundle(PlatformBundles.absoluteBundlePath("tenant-cd-api"));
if(deployState.zone().system().isPublic()) {
addPlatformBundle(PlatformBundles.absoluteBundlePath("cloud-tenant-cd"));
}
}
}
public void setModelEvaluation(ContainerModelEvaluation modelEvaluation) {
this.modelEvaluation = modelEvaluation;
}
public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage;
}
@Override
public Optional<Integer> getMemoryPercentage() {
if (memoryPercentage != null) {
return Optional.of(memoryPercentage);
} else if (isHostedVespa()) {
return getHostClusterId().isPresent() ?
Optional.of(heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster) :
Optional.of(heapSizePercentageOfTotalNodeMemory);
}
return Optional.empty();
}
/** Create list of endpoints, these will be consumed later by LbServicesProducer */
private void createEndpointList(DeployState deployState) {
if(!deployState.isHosted()) return;
if(deployState.getProperties().applicationId().instance().isTester()) return;
List<ApplicationClusterEndpoint> endpoints = new ArrayList<>();
List<String> hosts = getContainers().stream()
.map(AbstractService::getHostName)
.collect(Collectors.toList());
for (String suffix : deployState.getProperties().zoneDnsSuffixes()) {
ApplicationClusterEndpoint.DnsName l4Name = ApplicationClusterEndpoint.DnsName.sharedL4NameFrom(
deployState.zone().system(),
ClusterSpec.Id.from(getName()),
deployState.getProperties().applicationId(),
suffix);
endpoints.add(ApplicationClusterEndpoint.builder()
.zoneScope()
.sharedL4Routing()
.dnsName(l4Name)
.hosts(hosts)
.clusterId(getName())
.build());
}
Set<ContainerEndpoint> endpointsFromController = deployState.getEndpoints();
endpointsFromController.stream()
.filter(ce -> ce.clusterId().equals(getName()))
.filter(ce -> ce.routingMethod() == sharedLayer4)
.forEach(ce -> ce.names().forEach(
name -> endpoints.add(ApplicationClusterEndpoint.builder()
.scope(ce.scope())
.weight(Long.valueOf(ce.weight().orElse(1)).intValue())
.routingMethod(ce.routingMethod())
.dnsName(ApplicationClusterEndpoint.DnsName.from(name))
.hosts(hosts)
.clusterId(getName())
.build())
));
endpointList = List.copyOf(endpoints);
}
@Override
public void getConfig(ApplicationBundlesConfig.Builder builder) {
applicationBundles.stream().map(FileReference::value)
.forEach(builder::bundles);
}
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(RankingConstantsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(OnnxModelsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
public void getConfig(RankingExpressionsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(ContainerMbusConfig.Builder builder) {
if (mbusParams != null) {
if (mbusParams.maxConcurrentFactor != null)
builder.maxConcurrentFactor(mbusParams.maxConcurrentFactor);
if (mbusParams.documentExpansionFactor != null)
builder.documentExpansionFactor(mbusParams.documentExpansionFactor);
if (mbusParams.containerCoreMemory != null)
builder.containerCoreMemory(mbusParams.containerCoreMemory);
}
if (getDocproc() != null)
getDocproc().getConfig(builder);
builder.transport_events_before_wakeup(transport_events_before_wakeup);
builder.numconnectionspertarget(transport_connections_per_target);
}
@Override
public void getConfig(MetricsProxyApiConfig.Builder builder) {
builder.metricsPort(MetricsProxyContainer.BASEPORT)
.metricsApiPath(ApplicationMetricsHandler.METRICS_VALUES_PATH)
.prometheusApiPath(ApplicationMetricsHandler.PROMETHEUS_VALUES_PATH);
}
@Override
public void getConfig(QrStartConfig.Builder builder) {
super.getConfig(builder);
builder.jvm.verbosegc(true)
.availableProcessors(0)
.compressedClassSpaceSize(0)
.minHeapsize(1536)
.heapsize(1536);
if (getMemoryPercentage().isPresent()) {
builder.jvm.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get());
}
}
@Override
public void getConfig(ZookeeperServerConfig.Builder builder) {
if (getParent() instanceof ConfigserverCluster) return;
for (Container container : getContainers()) {
ZookeeperServerConfig.Server.Builder serverBuilder = new ZookeeperServerConfig.Server.Builder();
serverBuilder.hostname(container.getHostName())
.id(container.index())
.joining( ! previousHosts.isEmpty() &&
! previousHosts.contains(container.getHostName()))
.retired(container.isRetired());
builder.server(serverBuilder);
builder.dynamicReconfiguration(true);
}
}
@Override
public Optional<String> getTlsClientAuthority() {
return tlsClientAuthority;
}
public void setMbusParams(MbusParams mbusParams) {
this.mbusParams = mbusParams;
}
public void setMessageBusEnabled(boolean messageBusEnabled) { this.messageBusEnabled = messageBusEnabled; }
public void setZookeeperSessionTimeoutSeconds(int timeoutSeconds) {
this.zookeeperSessionTimeoutSeconds = timeoutSeconds;
}
protected boolean messageBusEnabled() { return messageBusEnabled; }
public void addMbusServer(ComponentId chainId) {
ComponentId serviceId = chainId.nestInNamespace(ComponentId.fromString("MbusServer"));
addComponent(
new Component<>(new ComponentModel(new BundleInstantiationSpecification(
serviceId,
ComponentSpecification.fromString(MbusServerProvider.class.getName()),
null))));
}
@Override
public List<ApplicationClusterEndpoint> endpoints() {
return endpointList;
}
@Override
public String name() { return getName(); }
public static class MbusParams {
final Double maxConcurrentFactor;
final Double documentExpansionFactor;
final Integer containerCoreMemory;
public MbusParams(Double maxConcurrentFactor, Double documentExpansionFactor, Integer containerCoreMemory) {
this.maxConcurrentFactor = maxConcurrentFactor;
this.documentExpansionFactor = documentExpansionFactor;
this.containerCoreMemory = containerCoreMemory;
}
}
} | class ApplicationContainerCluster extends ContainerCluster<ApplicationContainer> implements
ApplicationBundlesConfig.Producer,
QrStartConfig.Producer,
RankProfilesConfig.Producer,
RankingConstantsConfig.Producer,
OnnxModelsConfig.Producer,
RankingExpressionsConfig.Producer,
ContainerMbusConfig.Producer,
MetricsProxyApiConfig.Producer,
ZookeeperServerConfig.Producer,
ApplicationClusterInfo {
public static final String METRICS_V2_HANDLER_CLASS = MetricsV2Handler.class.getName();
public static final BindingPattern METRICS_V2_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH);
public static final BindingPattern METRICS_V2_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH + "/*");
public static final String PROMETHEUS_V1_HANDLER_CLASS = PrometheusV1Handler.class.getName();
private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH);
private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH + "/*");
public static final int defaultHeapSizePercentageOfTotalNodeMemory = 70;
public static final int heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster = 18;
private final Set<FileReference> applicationBundles = new LinkedHashSet<>();
private final Set<String> previousHosts;
private ContainerModelEvaluation modelEvaluation;
private final Optional<String> tlsClientAuthority;
private MbusParams mbusParams;
private boolean messageBusEnabled = true;
private int zookeeperSessionTimeoutSeconds = 30;
private final int transport_events_before_wakeup;
private final int transport_connections_per_target;
private final int heapSizePercentageOfTotalNodeMemory;
private Integer memoryPercentage = null;
private List<ApplicationClusterEndpoint> endpointList = List.of();
public ApplicationContainerCluster(AbstractConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState) {
super(parent, configSubId, clusterId, deployState, true, 10);
this.tlsClientAuthority = deployState.tlsClientAuthority();
previousHosts = Collections.unmodifiableSet(deployState.getPreviousModel().stream()
.map(Model::allocatedHosts)
.map(AllocatedHosts::getHosts)
.flatMap(Collection::stream)
.map(HostSpec::hostname)
.collect(Collectors.toCollection(() -> new LinkedHashSet<>())));
addSimpleComponent("com.yahoo.language.provider.DefaultLinguisticsProvider");
addSimpleComponent("com.yahoo.language.provider.DefaultEmbedderProvider");
addSimpleComponent("com.yahoo.container.jdisc.SecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.CertificateStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.AthenzIdentityProviderProvider");
addSimpleComponent(com.yahoo.container.core.documentapi.DocumentAccessProvider.class.getName());
addSimpleComponent(DOCUMENT_TYPE_MANAGER_CLASS);
addMetricsHandlers();
addTestrunnerComponentsIfTester(deployState);
transport_connections_per_target = deployState.featureFlags().mbusJavaRpcNumTargets();
transport_events_before_wakeup = deployState.featureFlags().mbusJavaEventsBeforeWakeup();
heapSizePercentageOfTotalNodeMemory = deployState.featureFlags().heapSizePercentage() > 0
? Math.min(99, deployState.featureFlags().heapSizePercentage())
: defaultHeapSizePercentageOfTotalNodeMemory;
}
@Override
protected void doPrepare(DeployState deployState) {
addAndSendApplicationBundles(deployState);
sendUserConfiguredFiles(deployState);
createEndpointList(deployState);
}
private void addAndSendApplicationBundles(DeployState deployState) {
for (ComponentInfo component : deployState.getApplicationPackage().getComponentsInfo(deployState.getVespaVersion())) {
FileReference reference = deployState.getFileRegistry().addFile(component.getPathRelativeToAppDir());
applicationBundles.add(reference);
}
}
private void sendUserConfiguredFiles(DeployState deployState) {
FileSender fileSender = new FileSender(containers, deployState.getFileRegistry(), deployState.getDeployLogger());
for (Component<?, ?> component : getAllComponents()) {
fileSender.sendUserConfiguredFiles(component);
}
}
private void addMetricsHandlers() {
addMetricsHandler(METRICS_V2_HANDLER_CLASS, METRICS_V2_HANDLER_BINDING_1, METRICS_V2_HANDLER_BINDING_2);
addMetricsHandler(PROMETHEUS_V1_HANDLER_CLASS, PROMETHEUS_V1_HANDLER_BINDING_1, PROMETHEUS_V1_HANDLER_BINDING_2);
}
private void addMetricsHandler(String handlerClass, BindingPattern rootBinding, BindingPattern innerBinding) {
Handler handler = new Handler(
new ComponentModel(handlerClass, null, null, null));
handler.addServerBindings(rootBinding, innerBinding);
addComponent(handler);
}
private void addTestrunnerComponentsIfTester(DeployState deployState) {
if (deployState.isHosted() && deployState.getProperties().applicationId().instance().isTester()) {
addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-testrunner-components"));
addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-osgi-testrunner"));
addPlatformBundle(PlatformBundles.absoluteBundlePath("tenant-cd-api"));
if(deployState.zone().system().isPublic()) {
addPlatformBundle(PlatformBundles.absoluteBundlePath("cloud-tenant-cd"));
}
}
}
public void setModelEvaluation(ContainerModelEvaluation modelEvaluation) {
this.modelEvaluation = modelEvaluation;
}
public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage; }
@Override
public Optional<Integer> getMemoryPercentage() {
if (memoryPercentage != null) {
return Optional.of(memoryPercentage);
} else if (isHostedVespa()) {
return getHostClusterId().isPresent() ?
Optional.of(heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster) :
Optional.of(heapSizePercentageOfTotalNodeMemory);
}
return Optional.empty();
}
/** Create list of endpoints, these will be consumed later by LbServicesProducer */
private void createEndpointList(DeployState deployState) {
if(!deployState.isHosted()) return;
if(deployState.getProperties().applicationId().instance().isTester()) return;
List<ApplicationClusterEndpoint> endpoints = new ArrayList<>();
List<String> hosts = getContainers().stream()
.map(AbstractService::getHostName)
.sorted()
.collect(Collectors.toList());
for (String suffix : deployState.getProperties().zoneDnsSuffixes()) {
ApplicationClusterEndpoint.DnsName l4Name = ApplicationClusterEndpoint.DnsName.sharedL4NameFrom(
deployState.zone().system(),
ClusterSpec.Id.from(getName()),
deployState.getProperties().applicationId(),
suffix);
endpoints.add(ApplicationClusterEndpoint.builder()
.zoneScope()
.sharedL4Routing()
.dnsName(l4Name)
.hosts(hosts)
.clusterId(getName())
.build());
}
Set<ContainerEndpoint> endpointsFromController = deployState.getEndpoints();
endpointsFromController.stream()
.filter(ce -> ce.clusterId().equals(getName()))
.filter(ce -> ce.routingMethod() == sharedLayer4)
.forEach(ce -> ce.names().forEach(
name -> endpoints.add(ApplicationClusterEndpoint.builder()
.scope(ce.scope())
.weight(Long.valueOf(ce.weight().orElse(1)).intValue())
.routingMethod(ce.routingMethod())
.dnsName(ApplicationClusterEndpoint.DnsName.from(name))
.hosts(hosts)
.clusterId(getName())
.build())
));
endpointList = List.copyOf(endpoints);
}
@Override
public void getConfig(ApplicationBundlesConfig.Builder builder) {
applicationBundles.stream().map(FileReference::value)
.forEach(builder::bundles);
}
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(RankingConstantsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(OnnxModelsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
public void getConfig(RankingExpressionsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(ContainerMbusConfig.Builder builder) {
if (mbusParams != null) {
if (mbusParams.maxConcurrentFactor != null)
builder.maxConcurrentFactor(mbusParams.maxConcurrentFactor);
if (mbusParams.documentExpansionFactor != null)
builder.documentExpansionFactor(mbusParams.documentExpansionFactor);
if (mbusParams.containerCoreMemory != null)
builder.containerCoreMemory(mbusParams.containerCoreMemory);
}
if (getDocproc() != null)
getDocproc().getConfig(builder);
builder.transport_events_before_wakeup(transport_events_before_wakeup);
builder.numconnectionspertarget(transport_connections_per_target);
}
@Override
public void getConfig(MetricsProxyApiConfig.Builder builder) {
builder.metricsPort(MetricsProxyContainer.BASEPORT)
.metricsApiPath(ApplicationMetricsHandler.METRICS_VALUES_PATH)
.prometheusApiPath(ApplicationMetricsHandler.PROMETHEUS_VALUES_PATH);
}
@Override
public void getConfig(QrStartConfig.Builder builder) {
super.getConfig(builder);
builder.jvm.verbosegc(true)
.availableProcessors(0)
.compressedClassSpaceSize(0)
.minHeapsize(1536)
.heapsize(1536);
if (getMemoryPercentage().isPresent()) {
builder.jvm.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get());
}
}
@Override
public void getConfig(ZookeeperServerConfig.Builder builder) {
if (getParent() instanceof ConfigserverCluster) return;
for (Container container : getContainers()) {
ZookeeperServerConfig.Server.Builder serverBuilder = new ZookeeperServerConfig.Server.Builder();
serverBuilder.hostname(container.getHostName())
.id(container.index())
.joining( ! previousHosts.isEmpty() &&
! previousHosts.contains(container.getHostName()))
.retired(container.isRetired());
builder.server(serverBuilder);
builder.dynamicReconfiguration(true);
}
}
@Override
public Optional<String> getTlsClientAuthority() {
return tlsClientAuthority;
}
public void setMbusParams(MbusParams mbusParams) {
this.mbusParams = mbusParams;
}
public void setMessageBusEnabled(boolean messageBusEnabled) { this.messageBusEnabled = messageBusEnabled; }
public void setZookeeperSessionTimeoutSeconds(int timeoutSeconds) {
this.zookeeperSessionTimeoutSeconds = timeoutSeconds;
}
protected boolean messageBusEnabled() { return messageBusEnabled; }
public void addMbusServer(ComponentId chainId) {
ComponentId serviceId = chainId.nestInNamespace(ComponentId.fromString("MbusServer"));
addComponent(
new Component<>(new ComponentModel(new BundleInstantiationSpecification(
serviceId,
ComponentSpecification.fromString(MbusServerProvider.class.getName()),
null))));
}
@Override
public List<ApplicationClusterEndpoint> endpoints() {
return endpointList;
}
@Override
public String name() { return getName(); }
public static class MbusParams {
final Double maxConcurrentFactor;
final Double documentExpansionFactor;
final Integer containerCoreMemory;
public MbusParams(Double maxConcurrentFactor, Double documentExpansionFactor, Integer containerCoreMemory) {
this.maxConcurrentFactor = maxConcurrentFactor;
this.documentExpansionFactor = documentExpansionFactor;
this.containerCoreMemory = containerCoreMemory;
}
}
} |
Why `toArray`? | private void validateEndpointRegions(List<Endpoint> endpoints, DeploymentInstanceSpec instance) {
for (var endpoint : endpoints) {
RegionName[] regions = new HashSet<>(endpoint.regions()).toArray(RegionName[]::new);
Set<CloudName> clouds = controller.zoneRegistry().zones().all().in(Environment.prod)
.in(regions)
.zones().stream()
.map(ZoneApi::getCloudName)
.collect(Collectors.toSet());
String endpointString = instance == null ? "Application endpoint '" + endpoint.endpointId() + "'"
: "Endpoint '" + endpoint.endpointId() + "' in " + instance;
if (Set.of(CloudName.GCP, CloudName.AWS).containsAll(clouds)) { }
else if (Set.of(CloudName.DEFAULT).containsAll(clouds)) {
if (endpoint.level() == Level.application && regions.length != 1) {
throw new IllegalArgumentException(endpointString + " cannot contain different regions: " +
endpoint.regions().stream().sorted().toList());
}
}
else {
throw new IllegalArgumentException(endpointString + " cannot contain regions in different clouds: " +
endpoint.regions().stream().sorted().toList());
}
}
} | RegionName[] regions = new HashSet<>(endpoint.regions()).toArray(RegionName[]::new); | private void validateEndpointRegions(List<Endpoint> endpoints, DeploymentInstanceSpec instance) {
for (var endpoint : endpoints) {
RegionName[] regions = new HashSet<>(endpoint.regions()).toArray(RegionName[]::new);
Set<CloudName> clouds = controller.zoneRegistry().zones().all().in(Environment.prod)
.in(regions)
.zones().stream()
.map(ZoneApi::getCloudName)
.collect(Collectors.toSet());
String endpointString = instance == null ? "Application endpoint '" + endpoint.endpointId() + "'"
: "Endpoint '" + endpoint.endpointId() + "' in " + instance;
if (Set.of(CloudName.GCP, CloudName.AWS).containsAll(clouds)) { }
else if (Set.of(CloudName.DEFAULT).containsAll(clouds)) {
if (endpoint.level() == Level.application && regions.length != 1) {
throw new IllegalArgumentException(endpointString + " cannot contain different regions: " +
endpoint.regions().stream().sorted().toList());
}
}
else {
throw new IllegalArgumentException(endpointString + " cannot contain regions in different clouds: " +
endpoint.regions().stream().sorted().toList());
}
}
} | class ApplicationPackageValidator {
private final Controller controller;
public ApplicationPackageValidator(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
}
/**
* Validate the given application package
*
* @throws IllegalArgumentException if any validations fail
*/
public void validate(Application application, ApplicationPackage applicationPackage, Instant instant) {
validateSteps(applicationPackage.deploymentSpec());
validateEndpointRegions(applicationPackage.deploymentSpec());
validateEndpointChange(application, applicationPackage, instant);
validateCompactedEndpoint(applicationPackage);
validateSecurityClientsPem(applicationPackage);
validateDeprecatedElements(applicationPackage);
}
/** Verify that deployment spec does not use elements deprecated on a major version older than wanted major version */
private void validateDeprecatedElements(ApplicationPackage applicationPackage) {
int wantedMajor = applicationPackage.compileVersion().map(Version::getMajor)
.or(() -> applicationPackage.deploymentSpec().majorVersion())
.or(() -> controller.readVersionStatus().controllerVersion()
.map(VespaVersion::versionNumber)
.map(Version::getMajor))
.orElseThrow(() -> new IllegalArgumentException("Could not determine wanted major version"));
for (var deprecatedElement : applicationPackage.deploymentSpec().deprecatedElements()) {
if (deprecatedElement.majorVersion() >= wantedMajor) continue;
throw new IllegalArgumentException(deprecatedElement.humanReadableString());
}
}
/** Verify that we have the security/clients.pem file for public systems */
private void validateSecurityClientsPem(ApplicationPackage applicationPackage) {
if (!controller.system().isPublic() || applicationPackage.deploymentSpec().steps().isEmpty()) return;
if (applicationPackage.trustedCertificates().isEmpty())
throw new IllegalArgumentException("Missing required file 'security/clients.pem'");
}
/** Verify that each of the production zones listed in the deployment spec exist in this system */
private void validateSteps(DeploymentSpec deploymentSpec) {
for (var spec : deploymentSpec.instances()) {
for (var zone : spec.zones()) {
Environment environment = zone.environment();
if (zone.region().isEmpty()) continue;
ZoneId zoneId = ZoneId.from(environment, zone.region().get());
if (!controller.zoneRegistry().hasZone(zoneId)) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
}
}
}
/** Verify that:
* <ul>
* <li>no single endpoint contains regions in different clouds</li>
* <li>application endpoints with different regions must be contained in CGP and AWS</li>
* </ul>
*/
private void validateEndpointRegions(DeploymentSpec deploymentSpec) {
for (var instance : deploymentSpec.instances()) {
validateEndpointRegions(instance.endpoints(), instance);
}
validateEndpointRegions(deploymentSpec.endpoints(), null);
}
/** Verify endpoint configuration of given application package */
private void validateEndpointChange(Application application, ApplicationPackage applicationPackage, Instant instant) {
applicationPackage.deploymentSpec().instances().forEach(instance -> validateEndpointChange(application,
instance.name(),
applicationPackage,
instant));
}
/** Verify that compactable endpoint parts (instance name and endpoint ID) do not clash */
private void validateCompactedEndpoint(ApplicationPackage applicationPackage) {
Map<List<String>, InstanceEndpoint> instanceEndpoints = new HashMap<>();
for (var instanceSpec : applicationPackage.deploymentSpec().instances()) {
for (var endpoint : instanceSpec.endpoints()) {
List<String> nonCompactableIds = nonCompactableIds(instanceSpec.name(), endpoint);
InstanceEndpoint instanceEndpoint = new InstanceEndpoint(instanceSpec.name(), endpoint.endpointId());
InstanceEndpoint existingEndpoint = instanceEndpoints.get(nonCompactableIds);
if (existingEndpoint != null) {
throw new IllegalArgumentException("Endpoint with ID '" + endpoint.endpointId() + "' in instance '"
+ instanceSpec.name().value() +
"' clashes with endpoint '" + existingEndpoint.endpointId +
"' in instance '" + existingEndpoint.instance + "'");
}
instanceEndpoints.put(nonCompactableIds, instanceEndpoint);
}
}
}
/** Verify changes to endpoint configuration by comparing given application package to the existing one, if any */
private void validateEndpointChange(Application application, InstanceName instanceName, ApplicationPackage applicationPackage, Instant instant) {
var validationId = ValidationId.globalEndpointChange;
if (applicationPackage.validationOverrides().allows(validationId, instant)) return;
var endpoints = application.deploymentSpec().instance(instanceName)
.map(ApplicationPackageValidator::allEndpointsOf)
.orElseGet(List::of);
var newEndpoints = allEndpointsOf(applicationPackage.deploymentSpec().requireInstance(instanceName));
if (newEndpoints.containsAll(endpoints)) return;
if (containsAllDestinationsOf(endpoints, newEndpoints)) return;
var removedEndpoints = new ArrayList<>(endpoints);
removedEndpoints.removeAll(newEndpoints);
newEndpoints.removeAll(endpoints);
throw new IllegalArgumentException(validationId.value() + ": application '" + application.id() +
(instanceName.isDefault() ? "" : "." + instanceName.value()) +
"' has endpoints " + endpoints +
", but does not include all of these in deployment.xml. Deploying given " +
"deployment.xml will remove " + removedEndpoints +
(newEndpoints.isEmpty() ? "" : " and add " + newEndpoints) +
". " + ValidationOverrides.toAllowMessage(validationId));
}
/** Returns whether newEndpoints contains all destinations in endpoints */
private static boolean containsAllDestinationsOf(List<Endpoint> endpoints, List<Endpoint> newEndpoints) {
var containsAllRegions = true;
var hasSameCluster = true;
for (var endpoint : endpoints) {
var endpointContainsAllRegions = false;
var endpointHasSameCluster = false;
for (var newEndpoint : newEndpoints) {
if (endpoint.endpointId().equals(newEndpoint.endpointId())) {
endpointContainsAllRegions = newEndpoint.regions().containsAll(endpoint.regions());
endpointHasSameCluster = newEndpoint.containerId().equals(endpoint.containerId());
}
}
containsAllRegions &= endpointContainsAllRegions;
hasSameCluster &= endpointHasSameCluster;
}
return containsAllRegions && hasSameCluster;
}
/** Returns all configued endpoints of given deployment instance spec */
private static List<Endpoint> allEndpointsOf(DeploymentInstanceSpec deploymentInstanceSpec) {
var endpoints = new ArrayList<>(deploymentInstanceSpec.endpoints());
legacyEndpoint(deploymentInstanceSpec).ifPresent(endpoints::add);
return endpoints;
}
/** Returns global service ID as an endpoint, if any global service ID is set */
private static Optional<Endpoint> legacyEndpoint(DeploymentInstanceSpec instance) {
return instance.globalServiceId().map(globalServiceId -> {
var targets = instance.zones().stream()
.filter(zone -> zone.environment().isProduction())
.flatMap(zone -> zone.region().stream())
.distinct()
.map(region -> new Endpoint.Target(region, instance.name(), 1))
.collect(Collectors.toList());
return new Endpoint(EndpointId.defaultId().id(), globalServiceId, Endpoint.Level.instance, targets);
});
}
/** Returns a list of the non-compactable IDs of given instance and endpoint */
private static List<String> nonCompactableIds(InstanceName instance, Endpoint endpoint) {
List<String> ids = new ArrayList<>(2);
if (!instance.isDefault()) {
ids.add(instance.value());
}
if (!"default".equals(endpoint.endpointId())) {
ids.add(endpoint.endpointId());
}
return ids;
}
private static class InstanceEndpoint {
private final InstanceName instance;
private final String endpointId;
public InstanceEndpoint(InstanceName instance, String endpointId) {
this.instance = instance;
this.endpointId = endpointId;
}
}
} | class ApplicationPackageValidator {
private final Controller controller;
public ApplicationPackageValidator(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
}
/**
* Validate the given application package
*
* @throws IllegalArgumentException if any validations fail
*/
public void validate(Application application, ApplicationPackage applicationPackage, Instant instant) {
validateSteps(applicationPackage.deploymentSpec());
validateEndpointRegions(applicationPackage.deploymentSpec());
validateEndpointChange(application, applicationPackage, instant);
validateCompactedEndpoint(applicationPackage);
validateSecurityClientsPem(applicationPackage);
validateDeprecatedElements(applicationPackage);
}
/** Verify that deployment spec does not use elements deprecated on a major version older than wanted major version */
private void validateDeprecatedElements(ApplicationPackage applicationPackage) {
int wantedMajor = applicationPackage.compileVersion().map(Version::getMajor)
.or(() -> applicationPackage.deploymentSpec().majorVersion())
.or(() -> controller.readVersionStatus().controllerVersion()
.map(VespaVersion::versionNumber)
.map(Version::getMajor))
.orElseThrow(() -> new IllegalArgumentException("Could not determine wanted major version"));
for (var deprecatedElement : applicationPackage.deploymentSpec().deprecatedElements()) {
if (deprecatedElement.majorVersion() >= wantedMajor) continue;
throw new IllegalArgumentException(deprecatedElement.humanReadableString());
}
}
/** Verify that we have the security/clients.pem file for public systems */
private void validateSecurityClientsPem(ApplicationPackage applicationPackage) {
if (!controller.system().isPublic() || applicationPackage.deploymentSpec().steps().isEmpty()) return;
if (applicationPackage.trustedCertificates().isEmpty())
throw new IllegalArgumentException("Missing required file 'security/clients.pem'");
}
/** Verify that each of the production zones listed in the deployment spec exist in this system */
private void validateSteps(DeploymentSpec deploymentSpec) {
for (var spec : deploymentSpec.instances()) {
for (var zone : spec.zones()) {
Environment environment = zone.environment();
if (zone.region().isEmpty()) continue;
ZoneId zoneId = ZoneId.from(environment, zone.region().get());
if (!controller.zoneRegistry().hasZone(zoneId)) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
}
}
}
/** Verify that:
* <ul>
* <li>no single endpoint contains regions in different clouds</li>
* <li>application endpoints with different regions must be contained in CGP and AWS</li>
* </ul>
*/
private void validateEndpointRegions(DeploymentSpec deploymentSpec) {
for (var instance : deploymentSpec.instances()) {
validateEndpointRegions(instance.endpoints(), instance);
}
validateEndpointRegions(deploymentSpec.endpoints(), null);
}
/** Verify endpoint configuration of given application package */
private void validateEndpointChange(Application application, ApplicationPackage applicationPackage, Instant instant) {
applicationPackage.deploymentSpec().instances().forEach(instance -> validateEndpointChange(application,
instance.name(),
applicationPackage,
instant));
}
/** Verify that compactable endpoint parts (instance name and endpoint ID) do not clash */
private void validateCompactedEndpoint(ApplicationPackage applicationPackage) {
Map<List<String>, InstanceEndpoint> instanceEndpoints = new HashMap<>();
for (var instanceSpec : applicationPackage.deploymentSpec().instances()) {
for (var endpoint : instanceSpec.endpoints()) {
List<String> nonCompactableIds = nonCompactableIds(instanceSpec.name(), endpoint);
InstanceEndpoint instanceEndpoint = new InstanceEndpoint(instanceSpec.name(), endpoint.endpointId());
InstanceEndpoint existingEndpoint = instanceEndpoints.get(nonCompactableIds);
if (existingEndpoint != null) {
throw new IllegalArgumentException("Endpoint with ID '" + endpoint.endpointId() + "' in instance '"
+ instanceSpec.name().value() +
"' clashes with endpoint '" + existingEndpoint.endpointId +
"' in instance '" + existingEndpoint.instance + "'");
}
instanceEndpoints.put(nonCompactableIds, instanceEndpoint);
}
}
}
/** Verify changes to endpoint configuration by comparing given application package to the existing one, if any */
private void validateEndpointChange(Application application, InstanceName instanceName, ApplicationPackage applicationPackage, Instant instant) {
var validationId = ValidationId.globalEndpointChange;
if (applicationPackage.validationOverrides().allows(validationId, instant)) return;
var endpoints = application.deploymentSpec().instance(instanceName)
.map(ApplicationPackageValidator::allEndpointsOf)
.orElseGet(List::of);
var newEndpoints = allEndpointsOf(applicationPackage.deploymentSpec().requireInstance(instanceName));
if (newEndpoints.containsAll(endpoints)) return;
if (containsAllDestinationsOf(endpoints, newEndpoints)) return;
var removedEndpoints = new ArrayList<>(endpoints);
removedEndpoints.removeAll(newEndpoints);
newEndpoints.removeAll(endpoints);
throw new IllegalArgumentException(validationId.value() + ": application '" + application.id() +
(instanceName.isDefault() ? "" : "." + instanceName.value()) +
"' has endpoints " + endpoints +
", but does not include all of these in deployment.xml. Deploying given " +
"deployment.xml will remove " + removedEndpoints +
(newEndpoints.isEmpty() ? "" : " and add " + newEndpoints) +
". " + ValidationOverrides.toAllowMessage(validationId));
}
/** Returns whether newEndpoints contains all destinations in endpoints */
private static boolean containsAllDestinationsOf(List<Endpoint> endpoints, List<Endpoint> newEndpoints) {
var containsAllRegions = true;
var hasSameCluster = true;
for (var endpoint : endpoints) {
var endpointContainsAllRegions = false;
var endpointHasSameCluster = false;
for (var newEndpoint : newEndpoints) {
if (endpoint.endpointId().equals(newEndpoint.endpointId())) {
endpointContainsAllRegions = newEndpoint.regions().containsAll(endpoint.regions());
endpointHasSameCluster = newEndpoint.containerId().equals(endpoint.containerId());
}
}
containsAllRegions &= endpointContainsAllRegions;
hasSameCluster &= endpointHasSameCluster;
}
return containsAllRegions && hasSameCluster;
}
/** Returns all configued endpoints of given deployment instance spec */
private static List<Endpoint> allEndpointsOf(DeploymentInstanceSpec deploymentInstanceSpec) {
var endpoints = new ArrayList<>(deploymentInstanceSpec.endpoints());
legacyEndpoint(deploymentInstanceSpec).ifPresent(endpoints::add);
return endpoints;
}
/** Returns global service ID as an endpoint, if any global service ID is set */
private static Optional<Endpoint> legacyEndpoint(DeploymentInstanceSpec instance) {
return instance.globalServiceId().map(globalServiceId -> {
var targets = instance.zones().stream()
.filter(zone -> zone.environment().isProduction())
.flatMap(zone -> zone.region().stream())
.distinct()
.map(region -> new Endpoint.Target(region, instance.name(), 1))
.collect(Collectors.toList());
return new Endpoint(EndpointId.defaultId().id(), globalServiceId, Endpoint.Level.instance, targets);
});
}
/** Returns a list of the non-compactable IDs of given instance and endpoint */
private static List<String> nonCompactableIds(InstanceName instance, Endpoint endpoint) {
List<String> ids = new ArrayList<>(2);
if (!instance.isDefault()) {
ids.add(instance.value());
}
if (!"default".equals(endpoint.endpointId())) {
ids.add(endpoint.endpointId());
}
return ids;
}
private static class InstanceEndpoint {
private final InstanceName instance;
private final String endpointId;
public InstanceEndpoint(InstanceName instance, String endpointId) {
this.instance = instance;
this.endpointId = endpointId;
}
}
} |
😅 | void testDnsUpdatesForApplicationEndpoint() {
ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta");
ApplicationId main = ApplicationId.from("tenant1", "app1", "main");
var context = tester.newDeploymentContext(beta);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region("us-east-3")
.region("us-west-1")
.region("aws-us-east-1a")
.region("aws-us-east-1b")
.applicationEndpoint("a", "default",
Map.of("aws-us-east-1a", Map.of(beta.instance(), 2,
main.instance(), 8),
"aws-us-east-1b", Map.of(main.instance(), 1)))
.applicationEndpoint("b", "default", "aws-us-east-1a",
Map.of(beta.instance(), 1,
main.instance(), 1))
.applicationEndpoint("c", "default", "aws-us-east-1b",
Map.of(beta.instance(), 4))
.applicationEndpoint("d", "default", "us-west-1",
Map.of(main.instance(), 7,
beta.instance(), 3))
.applicationEndpoint("e", "default", "us-east-3",
Map.of(main.instance(), 3))
.build();
context.submit(applicationPackage).deploy();
ZoneId east3 = ZoneId.from("prod", "us-east-3");
ZoneId west1 = ZoneId.from("prod", "us-west-1");
ZoneId east1a = ZoneId.from("prod", "aws-us-east-1a");
ZoneId east1b = ZoneId.from("prod", "aws-us-east-1b");
Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of(
new DeploymentId(beta, east3), Map.of(),
new DeploymentId(main, east3), Map.of("e.app1.tenant1.us-east-3-r.vespa.oath.cloud", 3),
new DeploymentId(beta, west1), Map.of("d.app1.tenant1.us-west-1-r.vespa.oath.cloud", 3),
new DeploymentId(main, west1), Map.of("d.app1.tenant1.us-west-1-r.vespa.oath.cloud", 7),
new DeploymentId(beta, east1a), Map.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 2,
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 1),
new DeploymentId(main, east1a), Map.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 8,
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 1),
new DeploymentId(beta, east1b), Map.of("c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud", 4),
new DeploymentId(main, east1b), Map.of("a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud", 1)
);
deploymentEndpoints.forEach((deployment, endpoints) -> {
Set<ContainerEndpoint> expected = endpoints.entrySet().stream()
.map(kv -> new ContainerEndpoint("default", "application",
List.of(kv.getKey()),
OptionalInt.of(kv.getValue()),
tester.controller().zoneRegistry().routingMethod(deployment.zoneId())))
.collect(Collectors.toSet());
assertEquals(expected,
tester.configServer().containerEndpoints().get(deployment),
"Endpoint names for " + deployment + " are passed to config server");
});
context.flushDnsUpdates();
Set<Record> records = tester.controllerTester().nameService().records();
assertEquals(new TreeSet<>(Set.of(new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1b.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1b.")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/2")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/8")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/4")),
new Record(Record.Type.CNAME,
RecordName.from("d.app1.tenant1.us-west-1-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-west-1.")),
new Record(Record.Type.CNAME,
RecordName.from("e.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-east-3.")))),
new TreeSet<>(records));
List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application())
.scope(Endpoint.Scope.application)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud",
"a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud",
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud",
"c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud",
"d.app1.tenant1.us-west-1-r.vespa.oath.cloud",
"e.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
endpointDnsNames);
} | RecordData.from("vip.prod.us-east-3.")))), | void testDnsUpdatesForApplicationEndpoint() {
ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta");
ApplicationId main = ApplicationId.from("tenant1", "app1", "main");
var context = tester.newDeploymentContext(beta);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region("us-east-3")
.region("us-west-1")
.region("aws-us-east-1a")
.region("aws-us-east-1b")
.applicationEndpoint("a", "default",
Map.of("aws-us-east-1a", Map.of(beta.instance(), 2,
main.instance(), 8),
"aws-us-east-1b", Map.of(main.instance(), 1)))
.applicationEndpoint("b", "default", "aws-us-east-1a",
Map.of(beta.instance(), 1,
main.instance(), 1))
.applicationEndpoint("c", "default", "aws-us-east-1b",
Map.of(beta.instance(), 4))
.applicationEndpoint("d", "default", "us-west-1",
Map.of(main.instance(), 7,
beta.instance(), 3))
.applicationEndpoint("e", "default", "us-east-3",
Map.of(main.instance(), 3))
.build();
context.submit(applicationPackage).deploy();
ZoneId east3 = ZoneId.from("prod", "us-east-3");
ZoneId west1 = ZoneId.from("prod", "us-west-1");
ZoneId east1a = ZoneId.from("prod", "aws-us-east-1a");
ZoneId east1b = ZoneId.from("prod", "aws-us-east-1b");
Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of(
new DeploymentId(beta, east3), Map.of(),
new DeploymentId(main, east3), Map.of("e.app1.tenant1.us-east-3-r.vespa.oath.cloud", 3),
new DeploymentId(beta, west1), Map.of("d.app1.tenant1.us-west-1-r.vespa.oath.cloud", 3),
new DeploymentId(main, west1), Map.of("d.app1.tenant1.us-west-1-r.vespa.oath.cloud", 7),
new DeploymentId(beta, east1a), Map.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 2,
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 1),
new DeploymentId(main, east1a), Map.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 8,
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 1),
new DeploymentId(beta, east1b), Map.of("c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud", 4),
new DeploymentId(main, east1b), Map.of("a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud", 1)
);
deploymentEndpoints.forEach((deployment, endpoints) -> {
Set<ContainerEndpoint> expected = endpoints.entrySet().stream()
.map(kv -> new ContainerEndpoint("default", "application",
List.of(kv.getKey()),
OptionalInt.of(kv.getValue()),
tester.controller().zoneRegistry().routingMethod(deployment.zoneId())))
.collect(Collectors.toSet());
assertEquals(expected,
tester.configServer().containerEndpoints().get(deployment),
"Endpoint names for " + deployment + " are passed to config server");
});
context.flushDnsUpdates();
Set<Record> records = tester.controllerTester().nameService().records();
assertEquals(new TreeSet<>(Set.of(new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1b.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1b.")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/2")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/8")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/4")),
new Record(Record.Type.CNAME,
RecordName.from("d.app1.tenant1.us-west-1-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-west-1.")),
new Record(Record.Type.CNAME,
RecordName.from("e.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-east-3.")))),
new TreeSet<>(records));
List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application())
.scope(Endpoint.Scope.application)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud",
"a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud",
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud",
"c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud",
"d.app1.tenant1.us-west-1-r.vespa.oath.cloud",
"e.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
endpointDnsNames);
} | class ControllerTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
void testDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.explicitEnvironment(Environment.dev, Environment.perf)
.region("us-west-1")
.region("us-east-3")
.build();
Version version1 = tester.configServer().initialVersion();
var context = tester.newDeploymentContext();
context.submit(applicationPackage);
assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)),
context.application().revisions().get(context.instance().change().revision().get()),
"Application version is known from completion of initial job");
context.runJob(systemTest);
context.runJob(stagingTest);
RevisionId applicationVersion = context.instance().change().revision().get();
assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment");
tester.triggerJobs();
tester.clock().advance(Duration.ofSeconds(1));
context.timeOutUpgrade(productionUsWest1);
assertEquals(4, context.instanceJobs().size());
tester.triggerJobs();
tester.controllerTester().createNewController();
assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1")));
assertNotNull(tester.controller().applications().requireInstance(context.instanceId()));
context.submit(applicationPackage);
context.runJob(systemTest);
context.runJob(stagingTest);
context.triggerJobs().jobAborted(productionUsWest1);
context.runJob(productionUsWest1);
tester.triggerJobs();
context.runJob(productionUsEast3);
assertEquals(4, context.instanceJobs().size());
applicationPackage = new ApplicationPackageBuilder()
.instances("hellO")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("deep-space-9")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("us-east-3")
.build();
try {
assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1")));
context.submit(applicationPackage);
fail("Expected exception due to illegal production deployment removal");
}
catch (IllegalArgumentException e) {
assertEquals("deployment-removal: application instance 'tenant.application.default' is deployed in us-west-1, " +
"but this instance and region combination is removed from deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval),
e.getMessage());
}
assertNotNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was not removed");
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.upgradePolicy("default")
.region("us-east-3")
.build();
context.submit(applicationPackage);
assertNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was removed");
assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed");
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.instanceId().tenant()));
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
assertNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.deploymentIdIn(productionUsWest1.zone())));
}
@Test
void testGlobalRotationStatus() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.build();
context.submit(applicationPackage).deploy();
var deployment1 = context.deploymentIdIn(zone1);
DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1);
RoutingStatus status1 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.in, status1.value());
routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator);
RoutingStatus status2 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.out, status2.value());
RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus();
assertEquals(RoutingStatus.Value.in, status3.value());
}
@Test
void testDnsUpdatesForGlobalEndpoint() {
var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta");
var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default");
ZoneId usWest = ZoneId.from("prod.us-west-1");
ZoneId usCentral = ZoneId.from("prod.us-central-1");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,default")
.endpoint("default", "foo")
.region(usWest.region())
.region(usCentral.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)),
RoutingMethod.sharedLayer4);
betaContext.submit(applicationPackage).deploy();
{
Collection<Deployment> betaDeployments = betaContext.instance().deployments().values();
assertFalse(betaDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("beta.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-01"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : betaDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints()
.get(betaContext.deploymentIdIn(deployment.zone())));
}
betaContext.flushDnsUpdates();
}
{
Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values();
assertFalse(defaultDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : defaultDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone())));
}
defaultContext.flushDnsUpdates();
}
Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.",
"app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02.");
rotationCnames.forEach((cname, data) -> {
var record = tester.controllerTester().findCname(cname);
assertTrue(record.isPresent());
assertEquals(cname, record.get().name().asString());
assertEquals(data, record.get().data().asString());
});
Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"),
defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud"));
globalDnsNamesByInstance.forEach((instance, dnsNames) -> {
Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance)
.scope(Endpoint.Scope.global)
.asList().stream()
.map(Endpoint::dnsName)
.collect(Collectors.toSet());
assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance);
});
}
@Test
void testDnsUpdatesForGlobalEndpointLegacySyntax() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
for (Deployment deployment : deployments) {
assertEquals(Set.of("rotation-id-01",
"app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(1, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId())
.scope(Endpoint.Scope.global)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"),
globalDnsNames);
}
@Test
void testDnsUpdatesForMultipleGlobalEndpoints() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("foobar", "qrs", "us-west-1", "us-central-1")
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.endpoint("all", "qrs")
.endpoint("west", "qrs", "us-west-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
var notWest = Set.of(
"rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud"
);
var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud"));
for (Deployment deployment : deployments) {
assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest,
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(4, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record1.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record2.isPresent());
assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record3.isPresent());
assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString());
assertEquals("rotation-fqdn-03.", record3.get().data().asString());
var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record4.isPresent());
assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString());
assertEquals("rotation-fqdn-04.", record4.get().data().asString());
}
@Test
void testDnsUpdatesForGlobalEndpointChanges() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
var west = ZoneId.from("prod", "us-west-1");
var central = ZoneId.from("prod", "us-central-1");
var east = ZoneId.from("prod", "us-east-3");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage2).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
assertEquals(
Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(east))
,
"Zone " + east + " is a member of global endpoint");
ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage3).deploy();
for (var zone : List.of(west, central, east)) {
assertEquals(
zone.equals(east)
? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud")
: Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage4);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " +
"and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage5);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage6);
}
@Test
void testUnassignRotations() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-central-1")
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage2).deploy();
assertEquals(List.of(), context.instance().rotations());
assertEquals(
Set.of(),
tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1")))
);
}
@Test
void testDnsUpdatesWithChangeInRotationAssignment() {
String dnsName1 = "app1.tenant1.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
{
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isPresent());
assertEquals(dnsName1, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage);
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.application().id().tenant()));
try (RotationLock lock = tester.controller().routing().rotations().lock()) {
assertTrue(tester.controller().routing().rotations().availableRotations(lock)
.containsKey(new RotationId("rotation-id-01")),
"Rotation is unassigned");
}
context.flushDnsUpdates();
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isEmpty(), dnsName1 + " is removed");
}
String dnsName2 = "app2.tenant2.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant2", "app2", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
var record = tester.controllerTester().findCname(dnsName2);
assertTrue(record.isPresent());
assertEquals(dnsName2, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString());
assertEquals(2, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname(dnsName1);
assertTrue(record1.isPresent());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname(dnsName2);
assertTrue(record2.isPresent());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
}
}
@Test
@Test
void testDevDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]);
var context = tester.newDeploymentContext();
ZoneId zone = ZoneId.from("dev", "us-east-1");
tester.controllerTester().zoneRegistry()
.setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4);
context.runJob(zone, applicationPackage);
assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(),
"Application deployed and activated");
assertTrue(context.instanceJobs().isEmpty(),
"No job status added");
assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored");
Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone))
.asList()
.stream()
.map(Endpoint::routingMethod)
.collect(Collectors.toSet());
assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4));
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
tester.controller().applications().deactivate(context.instanceId(), zone);
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
}
@Test
void testDevDeploymentWithIncompatibleVersions() {
Version version1 = new Version("7");
Version version2 = new Version("7.5");
Version version3 = new Version("8");
var context = tester.newDeploymentContext();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
tester.controllerTester().upgradeSystem(version2);
tester.newDeploymentContext("keep", "v2", "alive").submit().deploy();
ZoneId zone = ZoneId.from("dev", "us-east-1");
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build());
assertEquals(version2, context.deployment(zone).version());
assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major that does not yet exist");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage());
}
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms are compatible with compile version 8", e.getMessage());
}
tester.controllerTester().upgradeSystem(version3);
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major which is incompatible with compile version");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage());
}
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
}
@Test
void testSuspension() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-east-3")
.build();
context.submit(applicationPackage).deploy();
DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3")));
assertFalse(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
tester.configServer().setSuspension(deployment1, true);
assertTrue(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
}
@Test
void testDeletingApplicationThatHasAlreadyBeenDeleted() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
context.submit(applicationPackage).runJob(zone, applicationPackage);
tester.controller().applications().deactivate(context.instanceId(), zone);
tester.controller().applications().deactivate(context.instanceId(), zone);
}
@Test
void testDeployApplicationWithWarnings() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from("prod", "us-west-1");
int warnings = 3;
tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings);
context.submit(applicationPackage).deploy();
assertEquals(warnings, context.deployment(zone)
.metrics().warnings().get(DeploymentMetrics.Warning.all).intValue());
}
@Test
void testDeploySelectivelyProvisionsCertificate() {
Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id());
var context1 = tester.newDeploymentContext("tenant1", "app1", "default");
var prodZone = ZoneId.from("prod", "us-west-1");
var stagingZone = ZoneId.from("staging", "us-east-3");
var testZone = ZoneId.from("test", "us-east-1");
tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone));
var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.region(prodZone.region())
.build();
context1.submit(applicationPackage).deploy();
var cert = certificate.apply(context1.instance());
assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod);
assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud",
"app1.tenant1.global.vespa.oath.cloud",
"*.app1.tenant1.global.vespa.oath.cloud"),
Stream.of(prodZone, testZone, stagingZone)
.flatMap(zone -> Stream.of("", "*.")
.map(prefix -> prefix + "app1.tenant1." + zone.region().value() +
(zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) +
".vespa.oath.cloud")))
.collect(Collectors.toUnmodifiableSet()),
Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId())));
context1.submit(applicationPackage).deploy();
assertEquals(cert, certificate.apply(context1.instance()));
var context2 = tester.newDeploymentContext("tenant1", "app2", "default");
var devZone = ZoneId.from("dev", "us-east-1");
context2.runJob(devZone, applicationPackage);
assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(),
"Application deployed and activated");
assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer");
}
@Test
void testDeployWithGlobalEndpointsInMultipleClouds() {
tester.controllerTester().zoneRegistry().setZones(
ZoneApiMock.fromId("test.us-west-1"),
ZoneApiMock.fromId("staging.us-west-1"),
ZoneApiMock.fromId("prod.us-west-1"),
ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build()
);
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("default", "default")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
var applicationPackage2 = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("aws", "default", "aws-us-east-1")
.endpoint("foo", "default", "aws-us-east-1", "us-west-1")
.build();
try {
context.submit(applicationPackage2);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
}
@Test
void testDeployWithoutSourceRevision() {
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.region("us-west-1")
.build();
context.submit(applicationPackage, Optional.empty())
.deploy();
assertEquals(1, context.instance().deployments().size(), "Deployed application");
}
@Test
void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.endpoint("east", "default", zone2.region().value())
.region(zone1.region())
.region(zone2.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4);
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive);
context.submit(applicationPackage).deploy();
var expectedRecords = List.of(
new Record(Record.Type.ALIAS,
RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"),
new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"),
"dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()),
new Record(Record.Type.ALIAS,
RecordName.from("east.application.tenant.global.vespa.oath.cloud"),
new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"),
"dns-zone-1", ZoneId.from("prod.us-east-3")).pack()),
new Record(Record.Type.CNAME,
RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"),
RecordData.from("lb-0--tenant.application.default--prod.us-east-3.")));
assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records()));
}
@Test
void testDeploymentDirectRouting() {
DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main));
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var zone3 = ZoneId.from("prod", "eu-west-1");
tester.controllerTester().zoneRegistry()
.exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3));
var applicationPackageBuilder = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.region(zone3.region())
.endpoint("default", "default")
.endpoint("foo", "qrs")
.endpoint("us", "default", zone1.region().value(), zone2.region().value())
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"));
context.submit(applicationPackageBuilder.build()).deploy();
for (var zone : List.of(zone1, zone2)) {
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud",
"us.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)),
"Expected container endpoints in " + zone);
}
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)),
"Expected container endpoints in " + zone3);
}
@Test
void testChangeEndpointCluster() {
var context = tester.newDeploymentContext();
var west = ZoneId.from("prod", "us-west-1");
var east = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region(west.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " +
"'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " +
"deployment.xml. Deploying given deployment.xml will remove " +
"[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " +
"[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " +
"<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " +
"https:
}
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
}
@Test
void testReadableApplications() {
var db = new MockCuratorDb(tester.controller().system());
var tester = new DeploymentTester(new ControllerTester(db));
var app1 = tester.newDeploymentContext("t1", "a1", "default")
.submit()
.deploy();
var app2 = tester.newDeploymentContext("t2", "a2", "default")
.submit()
.deploy();
assertEquals(2, tester.applications().readable().size());
db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()),
new byte[]{(byte) 0xDE, (byte) 0xAD});
assertEquals(1, tester.applications().readable().size());
try {
tester.applications().asList();
fail("Expected exception");
} catch (Exception ignored) {
}
app1.submit().deploy();
}
@Test
void testClashingEndpointIdAndInstanceName() {
String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" +
" <instance id=\"default\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"dev\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
" <instance id=\"dev\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"default\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
try {
tester.newDeploymentContext().submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'",
e.getMessage());
}
}
@Test
void testTestPackageWarnings() {
String deploymentXml = "<deployment version='1.0'>\n" +
" <prod>\n" +
" <region>us-west-1</region>\n" +
" </prod>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0]));
var app = tester.newDeploymentContext();
tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1);
assertEquals(List.of(new Notification(tester.clock().instant(),
Type.testPackage,
Level.warning,
NotificationSource.from(app.application().id()),
List.of("test package has staging tests, so it should also include staging setup",
"see https:
tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true));
}
@Test
void testCompileVersion() {
DeploymentContext context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build();
TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId());
Version version0 = Version.fromString("7.1");
tester.controllerTester().upgradeSystem(version0);
tester.upgrader().overrideConfidence(version0, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
context.submit(applicationPackage).deploy();
Version version1 = Version.fromString("7.2");
tester.controllerTester().upgradeSystem(version1);
tester.upgrader().overrideConfidence(version1, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().maintain();
context.deployPlatform(version1);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy();
TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app");
Version version2 = Version.fromString("8.0");
tester.controllerTester().upgradeSystem(version2);
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.broken);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals("no suitable, released compile version exists",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.empty()))
.getMessage());
assertEquals("no suitable, released compile version exists for specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
}
@Test
void testCloudAccount() {
DeploymentContext context = tester.newDeploymentContext();
ZoneId devZone = devUsEast1.zone();
ZoneId prodZone = productionUsWest1.zone();
String cloudAccount = "012345678912";
var applicationPackage = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone.region())
.build();
context.submit(applicationPackage).runJobExpectingFailure(systemTest, "Requested cloud account '012345678912' is not valid for tenant 'tenant'");
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
context.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'")
.abortJob(stagingTest);
tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone);
context.submit(applicationPackage).deploy();
tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), devZone);
context.runJob(devZone, applicationPackage);
for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) {
assertEquals(cloudAccount, tester.controllerTester().configServer()
.cloudAccount(context.deploymentIdIn(zoneId))
.get().value());
}
}
@Test
void testSubmitWithElementDeprecatedOnPreviousMajor() {
DeploymentContext context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.compileVersion(Version.fromString("8.1"))
.region("us-west-1")
.globalServiceId("qrs")
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7"));
}
}
} | class ControllerTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
void testDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.explicitEnvironment(Environment.dev, Environment.perf)
.region("us-west-1")
.region("us-east-3")
.build();
Version version1 = tester.configServer().initialVersion();
var context = tester.newDeploymentContext();
context.submit(applicationPackage);
assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)),
context.application().revisions().get(context.instance().change().revision().get()),
"Application version is known from completion of initial job");
context.runJob(systemTest);
context.runJob(stagingTest);
RevisionId applicationVersion = context.instance().change().revision().get();
assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment");
tester.triggerJobs();
tester.clock().advance(Duration.ofSeconds(1));
context.timeOutUpgrade(productionUsWest1);
assertEquals(4, context.instanceJobs().size());
tester.triggerJobs();
tester.controllerTester().createNewController();
assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1")));
assertNotNull(tester.controller().applications().requireInstance(context.instanceId()));
context.submit(applicationPackage);
context.runJob(systemTest);
context.runJob(stagingTest);
context.triggerJobs().jobAborted(productionUsWest1);
context.runJob(productionUsWest1);
tester.triggerJobs();
context.runJob(productionUsEast3);
assertEquals(4, context.instanceJobs().size());
applicationPackage = new ApplicationPackageBuilder()
.instances("hellO")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("deep-space-9")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("us-east-3")
.build();
try {
assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1")));
context.submit(applicationPackage);
fail("Expected exception due to illegal production deployment removal");
}
catch (IllegalArgumentException e) {
assertEquals("deployment-removal: application instance 'tenant.application.default' is deployed in us-west-1, " +
"but this instance and region combination is removed from deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval),
e.getMessage());
}
assertNotNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was not removed");
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.upgradePolicy("default")
.region("us-east-3")
.build();
context.submit(applicationPackage);
assertNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was removed");
assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed");
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.instanceId().tenant()));
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
assertNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.deploymentIdIn(productionUsWest1.zone())));
}
@Test
void testGlobalRotationStatus() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.build();
context.submit(applicationPackage).deploy();
var deployment1 = context.deploymentIdIn(zone1);
DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1);
RoutingStatus status1 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.in, status1.value());
routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator);
RoutingStatus status2 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.out, status2.value());
RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus();
assertEquals(RoutingStatus.Value.in, status3.value());
}
@Test
void testDnsUpdatesForGlobalEndpoint() {
var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta");
var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default");
ZoneId usWest = ZoneId.from("prod.us-west-1");
ZoneId usCentral = ZoneId.from("prod.us-central-1");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,default")
.endpoint("default", "foo")
.region(usWest.region())
.region(usCentral.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)),
RoutingMethod.sharedLayer4);
betaContext.submit(applicationPackage).deploy();
{
Collection<Deployment> betaDeployments = betaContext.instance().deployments().values();
assertFalse(betaDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("beta.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-01"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : betaDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints()
.get(betaContext.deploymentIdIn(deployment.zone())));
}
betaContext.flushDnsUpdates();
}
{
Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values();
assertFalse(defaultDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : defaultDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone())));
}
defaultContext.flushDnsUpdates();
}
Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.",
"app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02.");
rotationCnames.forEach((cname, data) -> {
var record = tester.controllerTester().findCname(cname);
assertTrue(record.isPresent());
assertEquals(cname, record.get().name().asString());
assertEquals(data, record.get().data().asString());
});
Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"),
defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud"));
globalDnsNamesByInstance.forEach((instance, dnsNames) -> {
Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance)
.scope(Endpoint.Scope.global)
.asList().stream()
.map(Endpoint::dnsName)
.collect(Collectors.toSet());
assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance);
});
}
@Test
void testDnsUpdatesForGlobalEndpointLegacySyntax() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
for (Deployment deployment : deployments) {
assertEquals(Set.of("rotation-id-01",
"app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(1, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId())
.scope(Endpoint.Scope.global)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"),
globalDnsNames);
}
@Test
void testDnsUpdatesForMultipleGlobalEndpoints() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("foobar", "qrs", "us-west-1", "us-central-1")
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.endpoint("all", "qrs")
.endpoint("west", "qrs", "us-west-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
var notWest = Set.of(
"rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud"
);
var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud"));
for (Deployment deployment : deployments) {
assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest,
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(4, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record1.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record2.isPresent());
assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record3.isPresent());
assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString());
assertEquals("rotation-fqdn-03.", record3.get().data().asString());
var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record4.isPresent());
assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString());
assertEquals("rotation-fqdn-04.", record4.get().data().asString());
}
@Test
void testDnsUpdatesForGlobalEndpointChanges() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
var west = ZoneId.from("prod", "us-west-1");
var central = ZoneId.from("prod", "us-central-1");
var east = ZoneId.from("prod", "us-east-3");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage2).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
assertEquals(
Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(east))
,
"Zone " + east + " is a member of global endpoint");
ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage3).deploy();
for (var zone : List.of(west, central, east)) {
assertEquals(
zone.equals(east)
? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud")
: Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage4);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " +
"and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage5);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage6);
}
@Test
void testUnassignRotations() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-central-1")
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage2).deploy();
assertEquals(List.of(), context.instance().rotations());
assertEquals(
Set.of(),
tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1")))
);
}
@Test
void testDnsUpdatesWithChangeInRotationAssignment() {
String dnsName1 = "app1.tenant1.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
{
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isPresent());
assertEquals(dnsName1, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage);
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.application().id().tenant()));
try (RotationLock lock = tester.controller().routing().rotations().lock()) {
assertTrue(tester.controller().routing().rotations().availableRotations(lock)
.containsKey(new RotationId("rotation-id-01")),
"Rotation is unassigned");
}
context.flushDnsUpdates();
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isEmpty(), dnsName1 + " is removed");
}
String dnsName2 = "app2.tenant2.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant2", "app2", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
var record = tester.controllerTester().findCname(dnsName2);
assertTrue(record.isPresent());
assertEquals(dnsName2, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString());
assertEquals(2, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname(dnsName1);
assertTrue(record1.isPresent());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname(dnsName2);
assertTrue(record2.isPresent());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
}
}
@Test
@Test
void testDevDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]);
var context = tester.newDeploymentContext();
ZoneId zone = ZoneId.from("dev", "us-east-1");
tester.controllerTester().zoneRegistry()
.setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4);
context.runJob(zone, applicationPackage);
assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(),
"Application deployed and activated");
assertTrue(context.instanceJobs().isEmpty(),
"No job status added");
assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored");
Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone))
.asList()
.stream()
.map(Endpoint::routingMethod)
.collect(Collectors.toSet());
assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4));
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
tester.controller().applications().deactivate(context.instanceId(), zone);
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
}
@Test
void testDevDeploymentWithIncompatibleVersions() {
Version version1 = new Version("7");
Version version2 = new Version("7.5");
Version version3 = new Version("8");
var context = tester.newDeploymentContext();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
tester.controllerTester().upgradeSystem(version2);
tester.newDeploymentContext("keep", "v2", "alive").submit().deploy();
ZoneId zone = ZoneId.from("dev", "us-east-1");
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build());
assertEquals(version2, context.deployment(zone).version());
assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major that does not yet exist");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage());
}
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms are compatible with compile version 8", e.getMessage());
}
tester.controllerTester().upgradeSystem(version3);
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major which is incompatible with compile version");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage());
}
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
}
@Test
void testSuspension() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-east-3")
.build();
context.submit(applicationPackage).deploy();
DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3")));
assertFalse(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
tester.configServer().setSuspension(deployment1, true);
assertTrue(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
}
@Test
void testDeletingApplicationThatHasAlreadyBeenDeleted() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
context.submit(applicationPackage).runJob(zone, applicationPackage);
tester.controller().applications().deactivate(context.instanceId(), zone);
tester.controller().applications().deactivate(context.instanceId(), zone);
}
@Test
void testDeployApplicationWithWarnings() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from("prod", "us-west-1");
int warnings = 3;
tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings);
context.submit(applicationPackage).deploy();
assertEquals(warnings, context.deployment(zone)
.metrics().warnings().get(DeploymentMetrics.Warning.all).intValue());
}
@Test
void testDeploySelectivelyProvisionsCertificate() {
Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id());
var context1 = tester.newDeploymentContext("tenant1", "app1", "default");
var prodZone = ZoneId.from("prod", "us-west-1");
var stagingZone = ZoneId.from("staging", "us-east-3");
var testZone = ZoneId.from("test", "us-east-1");
tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone));
var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.region(prodZone.region())
.build();
context1.submit(applicationPackage).deploy();
var cert = certificate.apply(context1.instance());
assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod);
assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud",
"app1.tenant1.global.vespa.oath.cloud",
"*.app1.tenant1.global.vespa.oath.cloud"),
Stream.of(prodZone, testZone, stagingZone)
.flatMap(zone -> Stream.of("", "*.")
.map(prefix -> prefix + "app1.tenant1." + zone.region().value() +
(zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) +
".vespa.oath.cloud")))
.collect(Collectors.toUnmodifiableSet()),
Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId())));
context1.submit(applicationPackage).deploy();
assertEquals(cert, certificate.apply(context1.instance()));
var context2 = tester.newDeploymentContext("tenant1", "app2", "default");
var devZone = ZoneId.from("dev", "us-east-1");
context2.runJob(devZone, applicationPackage);
assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(),
"Application deployed and activated");
assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer");
}
@Test
void testDeployWithGlobalEndpointsInMultipleClouds() {
tester.controllerTester().zoneRegistry().setZones(
ZoneApiMock.fromId("test.us-west-1"),
ZoneApiMock.fromId("staging.us-west-1"),
ZoneApiMock.fromId("prod.us-west-1"),
ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build()
);
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("default", "default")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
var applicationPackage2 = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("aws", "default", "aws-us-east-1")
.endpoint("foo", "default", "aws-us-east-1", "us-west-1")
.build();
try {
context.submit(applicationPackage2);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
}
@Test
void testDeployWithoutSourceRevision() {
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.region("us-west-1")
.build();
context.submit(applicationPackage, Optional.empty())
.deploy();
assertEquals(1, context.instance().deployments().size(), "Deployed application");
}
@Test
void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.endpoint("east", "default", zone2.region().value())
.region(zone1.region())
.region(zone2.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4);
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive);
context.submit(applicationPackage).deploy();
var expectedRecords = List.of(
new Record(Record.Type.ALIAS,
RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"),
new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"),
"dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()),
new Record(Record.Type.ALIAS,
RecordName.from("east.application.tenant.global.vespa.oath.cloud"),
new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"),
"dns-zone-1", ZoneId.from("prod.us-east-3")).pack()),
new Record(Record.Type.CNAME,
RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"),
RecordData.from("lb-0--tenant.application.default--prod.us-east-3.")));
assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records()));
}
@Test
void testDeploymentDirectRouting() {
DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main));
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var zone3 = ZoneId.from("prod", "eu-west-1");
tester.controllerTester().zoneRegistry()
.exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3));
var applicationPackageBuilder = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.region(zone3.region())
.endpoint("default", "default")
.endpoint("foo", "qrs")
.endpoint("us", "default", zone1.region().value(), zone2.region().value())
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"));
context.submit(applicationPackageBuilder.build()).deploy();
for (var zone : List.of(zone1, zone2)) {
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud",
"us.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)),
"Expected container endpoints in " + zone);
}
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)),
"Expected container endpoints in " + zone3);
}
@Test
void testChangeEndpointCluster() {
var context = tester.newDeploymentContext();
var west = ZoneId.from("prod", "us-west-1");
var east = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region(west.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " +
"'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " +
"deployment.xml. Deploying given deployment.xml will remove " +
"[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " +
"[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " +
"<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " +
"https:
}
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
}
@Test
void testReadableApplications() {
var db = new MockCuratorDb(tester.controller().system());
var tester = new DeploymentTester(new ControllerTester(db));
var app1 = tester.newDeploymentContext("t1", "a1", "default")
.submit()
.deploy();
var app2 = tester.newDeploymentContext("t2", "a2", "default")
.submit()
.deploy();
assertEquals(2, tester.applications().readable().size());
db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()),
new byte[]{(byte) 0xDE, (byte) 0xAD});
assertEquals(1, tester.applications().readable().size());
try {
tester.applications().asList();
fail("Expected exception");
} catch (Exception ignored) {
}
app1.submit().deploy();
}
@Test
void testClashingEndpointIdAndInstanceName() {
String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" +
" <instance id=\"default\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"dev\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
" <instance id=\"dev\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"default\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
try {
tester.newDeploymentContext().submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'",
e.getMessage());
}
}
@Test
void testTestPackageWarnings() {
String deploymentXml = "<deployment version='1.0'>\n" +
" <prod>\n" +
" <region>us-west-1</region>\n" +
" </prod>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0]));
var app = tester.newDeploymentContext();
tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1);
assertEquals(List.of(new Notification(tester.clock().instant(),
Type.testPackage,
Level.warning,
NotificationSource.from(app.application().id()),
List.of("test package has staging tests, so it should also include staging setup",
"see https:
tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true));
}
@Test
void testCompileVersion() {
DeploymentContext context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build();
TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId());
Version version0 = Version.fromString("7.1");
tester.controllerTester().upgradeSystem(version0);
tester.upgrader().overrideConfidence(version0, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
context.submit(applicationPackage).deploy();
Version version1 = Version.fromString("7.2");
tester.controllerTester().upgradeSystem(version1);
tester.upgrader().overrideConfidence(version1, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().maintain();
context.deployPlatform(version1);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy();
TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app");
Version version2 = Version.fromString("8.0");
tester.controllerTester().upgradeSystem(version2);
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.broken);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals("no suitable, released compile version exists",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.empty()))
.getMessage());
assertEquals("no suitable, released compile version exists for specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
}
@Test
void testCloudAccount() {
DeploymentContext context = tester.newDeploymentContext();
ZoneId devZone = devUsEast1.zone();
ZoneId prodZone = productionUsWest1.zone();
String cloudAccount = "012345678912";
var applicationPackage = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone.region())
.build();
context.submit(applicationPackage).runJobExpectingFailure(systemTest, "Requested cloud account '012345678912' is not valid for tenant 'tenant'");
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
context.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'")
.abortJob(stagingTest);
tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone);
context.submit(applicationPackage).deploy();
tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), devZone);
context.runJob(devZone, applicationPackage);
for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) {
assertEquals(cloudAccount, tester.controllerTester().configServer()
.cloudAccount(context.deploymentIdIn(zoneId))
.get().value());
}
}
@Test
void testSubmitWithElementDeprecatedOnPreviousMajor() {
DeploymentContext context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.compileVersion(Version.fromString("8.1"))
.region("us-west-1")
.globalServiceId("qrs")
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7"));
}
}
} |
That's the required argument for the `.in(RegionName... regions)`. | private void validateEndpointRegions(List<Endpoint> endpoints, DeploymentInstanceSpec instance) {
for (var endpoint : endpoints) {
RegionName[] regions = new HashSet<>(endpoint.regions()).toArray(RegionName[]::new);
Set<CloudName> clouds = controller.zoneRegistry().zones().all().in(Environment.prod)
.in(regions)
.zones().stream()
.map(ZoneApi::getCloudName)
.collect(Collectors.toSet());
String endpointString = instance == null ? "Application endpoint '" + endpoint.endpointId() + "'"
: "Endpoint '" + endpoint.endpointId() + "' in " + instance;
if (Set.of(CloudName.GCP, CloudName.AWS).containsAll(clouds)) { }
else if (Set.of(CloudName.DEFAULT).containsAll(clouds)) {
if (endpoint.level() == Level.application && regions.length != 1) {
throw new IllegalArgumentException(endpointString + " cannot contain different regions: " +
endpoint.regions().stream().sorted().toList());
}
}
else {
throw new IllegalArgumentException(endpointString + " cannot contain regions in different clouds: " +
endpoint.regions().stream().sorted().toList());
}
}
} | RegionName[] regions = new HashSet<>(endpoint.regions()).toArray(RegionName[]::new); | private void validateEndpointRegions(List<Endpoint> endpoints, DeploymentInstanceSpec instance) {
for (var endpoint : endpoints) {
RegionName[] regions = new HashSet<>(endpoint.regions()).toArray(RegionName[]::new);
Set<CloudName> clouds = controller.zoneRegistry().zones().all().in(Environment.prod)
.in(regions)
.zones().stream()
.map(ZoneApi::getCloudName)
.collect(Collectors.toSet());
String endpointString = instance == null ? "Application endpoint '" + endpoint.endpointId() + "'"
: "Endpoint '" + endpoint.endpointId() + "' in " + instance;
if (Set.of(CloudName.GCP, CloudName.AWS).containsAll(clouds)) { }
else if (Set.of(CloudName.DEFAULT).containsAll(clouds)) {
if (endpoint.level() == Level.application && regions.length != 1) {
throw new IllegalArgumentException(endpointString + " cannot contain different regions: " +
endpoint.regions().stream().sorted().toList());
}
}
else {
throw new IllegalArgumentException(endpointString + " cannot contain regions in different clouds: " +
endpoint.regions().stream().sorted().toList());
}
}
} | class ApplicationPackageValidator {
private final Controller controller;
public ApplicationPackageValidator(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
}
/**
* Validate the given application package
*
* @throws IllegalArgumentException if any validations fail
*/
public void validate(Application application, ApplicationPackage applicationPackage, Instant instant) {
validateSteps(applicationPackage.deploymentSpec());
validateEndpointRegions(applicationPackage.deploymentSpec());
validateEndpointChange(application, applicationPackage, instant);
validateCompactedEndpoint(applicationPackage);
validateSecurityClientsPem(applicationPackage);
validateDeprecatedElements(applicationPackage);
}
/** Verify that deployment spec does not use elements deprecated on a major version older than wanted major version */
private void validateDeprecatedElements(ApplicationPackage applicationPackage) {
int wantedMajor = applicationPackage.compileVersion().map(Version::getMajor)
.or(() -> applicationPackage.deploymentSpec().majorVersion())
.or(() -> controller.readVersionStatus().controllerVersion()
.map(VespaVersion::versionNumber)
.map(Version::getMajor))
.orElseThrow(() -> new IllegalArgumentException("Could not determine wanted major version"));
for (var deprecatedElement : applicationPackage.deploymentSpec().deprecatedElements()) {
if (deprecatedElement.majorVersion() >= wantedMajor) continue;
throw new IllegalArgumentException(deprecatedElement.humanReadableString());
}
}
/** Verify that we have the security/clients.pem file for public systems */
private void validateSecurityClientsPem(ApplicationPackage applicationPackage) {
if (!controller.system().isPublic() || applicationPackage.deploymentSpec().steps().isEmpty()) return;
if (applicationPackage.trustedCertificates().isEmpty())
throw new IllegalArgumentException("Missing required file 'security/clients.pem'");
}
/** Verify that each of the production zones listed in the deployment spec exist in this system */
private void validateSteps(DeploymentSpec deploymentSpec) {
for (var spec : deploymentSpec.instances()) {
for (var zone : spec.zones()) {
Environment environment = zone.environment();
if (zone.region().isEmpty()) continue;
ZoneId zoneId = ZoneId.from(environment, zone.region().get());
if (!controller.zoneRegistry().hasZone(zoneId)) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
}
}
}
/** Verify that:
* <ul>
* <li>no single endpoint contains regions in different clouds</li>
* <li>application endpoints with different regions must be contained in CGP and AWS</li>
* </ul>
*/
private void validateEndpointRegions(DeploymentSpec deploymentSpec) {
for (var instance : deploymentSpec.instances()) {
validateEndpointRegions(instance.endpoints(), instance);
}
validateEndpointRegions(deploymentSpec.endpoints(), null);
}
/** Verify endpoint configuration of given application package */
private void validateEndpointChange(Application application, ApplicationPackage applicationPackage, Instant instant) {
applicationPackage.deploymentSpec().instances().forEach(instance -> validateEndpointChange(application,
instance.name(),
applicationPackage,
instant));
}
/** Verify that compactable endpoint parts (instance name and endpoint ID) do not clash */
private void validateCompactedEndpoint(ApplicationPackage applicationPackage) {
Map<List<String>, InstanceEndpoint> instanceEndpoints = new HashMap<>();
for (var instanceSpec : applicationPackage.deploymentSpec().instances()) {
for (var endpoint : instanceSpec.endpoints()) {
List<String> nonCompactableIds = nonCompactableIds(instanceSpec.name(), endpoint);
InstanceEndpoint instanceEndpoint = new InstanceEndpoint(instanceSpec.name(), endpoint.endpointId());
InstanceEndpoint existingEndpoint = instanceEndpoints.get(nonCompactableIds);
if (existingEndpoint != null) {
throw new IllegalArgumentException("Endpoint with ID '" + endpoint.endpointId() + "' in instance '"
+ instanceSpec.name().value() +
"' clashes with endpoint '" + existingEndpoint.endpointId +
"' in instance '" + existingEndpoint.instance + "'");
}
instanceEndpoints.put(nonCompactableIds, instanceEndpoint);
}
}
}
/** Verify changes to endpoint configuration by comparing given application package to the existing one, if any */
private void validateEndpointChange(Application application, InstanceName instanceName, ApplicationPackage applicationPackage, Instant instant) {
var validationId = ValidationId.globalEndpointChange;
if (applicationPackage.validationOverrides().allows(validationId, instant)) return;
var endpoints = application.deploymentSpec().instance(instanceName)
.map(ApplicationPackageValidator::allEndpointsOf)
.orElseGet(List::of);
var newEndpoints = allEndpointsOf(applicationPackage.deploymentSpec().requireInstance(instanceName));
if (newEndpoints.containsAll(endpoints)) return;
if (containsAllDestinationsOf(endpoints, newEndpoints)) return;
var removedEndpoints = new ArrayList<>(endpoints);
removedEndpoints.removeAll(newEndpoints);
newEndpoints.removeAll(endpoints);
throw new IllegalArgumentException(validationId.value() + ": application '" + application.id() +
(instanceName.isDefault() ? "" : "." + instanceName.value()) +
"' has endpoints " + endpoints +
", but does not include all of these in deployment.xml. Deploying given " +
"deployment.xml will remove " + removedEndpoints +
(newEndpoints.isEmpty() ? "" : " and add " + newEndpoints) +
". " + ValidationOverrides.toAllowMessage(validationId));
}
/** Returns whether newEndpoints contains all destinations in endpoints */
private static boolean containsAllDestinationsOf(List<Endpoint> endpoints, List<Endpoint> newEndpoints) {
var containsAllRegions = true;
var hasSameCluster = true;
for (var endpoint : endpoints) {
var endpointContainsAllRegions = false;
var endpointHasSameCluster = false;
for (var newEndpoint : newEndpoints) {
if (endpoint.endpointId().equals(newEndpoint.endpointId())) {
endpointContainsAllRegions = newEndpoint.regions().containsAll(endpoint.regions());
endpointHasSameCluster = newEndpoint.containerId().equals(endpoint.containerId());
}
}
containsAllRegions &= endpointContainsAllRegions;
hasSameCluster &= endpointHasSameCluster;
}
return containsAllRegions && hasSameCluster;
}
/** Returns all configued endpoints of given deployment instance spec */
private static List<Endpoint> allEndpointsOf(DeploymentInstanceSpec deploymentInstanceSpec) {
var endpoints = new ArrayList<>(deploymentInstanceSpec.endpoints());
legacyEndpoint(deploymentInstanceSpec).ifPresent(endpoints::add);
return endpoints;
}
/** Returns global service ID as an endpoint, if any global service ID is set */
private static Optional<Endpoint> legacyEndpoint(DeploymentInstanceSpec instance) {
return instance.globalServiceId().map(globalServiceId -> {
var targets = instance.zones().stream()
.filter(zone -> zone.environment().isProduction())
.flatMap(zone -> zone.region().stream())
.distinct()
.map(region -> new Endpoint.Target(region, instance.name(), 1))
.collect(Collectors.toList());
return new Endpoint(EndpointId.defaultId().id(), globalServiceId, Endpoint.Level.instance, targets);
});
}
/** Returns a list of the non-compactable IDs of given instance and endpoint */
private static List<String> nonCompactableIds(InstanceName instance, Endpoint endpoint) {
List<String> ids = new ArrayList<>(2);
if (!instance.isDefault()) {
ids.add(instance.value());
}
if (!"default".equals(endpoint.endpointId())) {
ids.add(endpoint.endpointId());
}
return ids;
}
private static class InstanceEndpoint {
private final InstanceName instance;
private final String endpointId;
public InstanceEndpoint(InstanceName instance, String endpointId) {
this.instance = instance;
this.endpointId = endpointId;
}
}
} | class ApplicationPackageValidator {
private final Controller controller;
public ApplicationPackageValidator(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
}
/**
* Validate the given application package
*
* @throws IllegalArgumentException if any validations fail
*/
public void validate(Application application, ApplicationPackage applicationPackage, Instant instant) {
validateSteps(applicationPackage.deploymentSpec());
validateEndpointRegions(applicationPackage.deploymentSpec());
validateEndpointChange(application, applicationPackage, instant);
validateCompactedEndpoint(applicationPackage);
validateSecurityClientsPem(applicationPackage);
validateDeprecatedElements(applicationPackage);
}
/** Verify that deployment spec does not use elements deprecated on a major version older than wanted major version */
private void validateDeprecatedElements(ApplicationPackage applicationPackage) {
int wantedMajor = applicationPackage.compileVersion().map(Version::getMajor)
.or(() -> applicationPackage.deploymentSpec().majorVersion())
.or(() -> controller.readVersionStatus().controllerVersion()
.map(VespaVersion::versionNumber)
.map(Version::getMajor))
.orElseThrow(() -> new IllegalArgumentException("Could not determine wanted major version"));
for (var deprecatedElement : applicationPackage.deploymentSpec().deprecatedElements()) {
if (deprecatedElement.majorVersion() >= wantedMajor) continue;
throw new IllegalArgumentException(deprecatedElement.humanReadableString());
}
}
/** Verify that we have the security/clients.pem file for public systems */
private void validateSecurityClientsPem(ApplicationPackage applicationPackage) {
if (!controller.system().isPublic() || applicationPackage.deploymentSpec().steps().isEmpty()) return;
if (applicationPackage.trustedCertificates().isEmpty())
throw new IllegalArgumentException("Missing required file 'security/clients.pem'");
}
/** Verify that each of the production zones listed in the deployment spec exist in this system */
private void validateSteps(DeploymentSpec deploymentSpec) {
for (var spec : deploymentSpec.instances()) {
for (var zone : spec.zones()) {
Environment environment = zone.environment();
if (zone.region().isEmpty()) continue;
ZoneId zoneId = ZoneId.from(environment, zone.region().get());
if (!controller.zoneRegistry().hasZone(zoneId)) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
}
}
}
/** Verify that:
* <ul>
* <li>no single endpoint contains regions in different clouds</li>
* <li>application endpoints with different regions must be contained in CGP and AWS</li>
* </ul>
*/
private void validateEndpointRegions(DeploymentSpec deploymentSpec) {
for (var instance : deploymentSpec.instances()) {
validateEndpointRegions(instance.endpoints(), instance);
}
validateEndpointRegions(deploymentSpec.endpoints(), null);
}
/** Verify endpoint configuration of given application package */
private void validateEndpointChange(Application application, ApplicationPackage applicationPackage, Instant instant) {
applicationPackage.deploymentSpec().instances().forEach(instance -> validateEndpointChange(application,
instance.name(),
applicationPackage,
instant));
}
/** Verify that compactable endpoint parts (instance name and endpoint ID) do not clash */
private void validateCompactedEndpoint(ApplicationPackage applicationPackage) {
Map<List<String>, InstanceEndpoint> instanceEndpoints = new HashMap<>();
for (var instanceSpec : applicationPackage.deploymentSpec().instances()) {
for (var endpoint : instanceSpec.endpoints()) {
List<String> nonCompactableIds = nonCompactableIds(instanceSpec.name(), endpoint);
InstanceEndpoint instanceEndpoint = new InstanceEndpoint(instanceSpec.name(), endpoint.endpointId());
InstanceEndpoint existingEndpoint = instanceEndpoints.get(nonCompactableIds);
if (existingEndpoint != null) {
throw new IllegalArgumentException("Endpoint with ID '" + endpoint.endpointId() + "' in instance '"
+ instanceSpec.name().value() +
"' clashes with endpoint '" + existingEndpoint.endpointId +
"' in instance '" + existingEndpoint.instance + "'");
}
instanceEndpoints.put(nonCompactableIds, instanceEndpoint);
}
}
}
/** Verify changes to endpoint configuration by comparing given application package to the existing one, if any */
private void validateEndpointChange(Application application, InstanceName instanceName, ApplicationPackage applicationPackage, Instant instant) {
var validationId = ValidationId.globalEndpointChange;
if (applicationPackage.validationOverrides().allows(validationId, instant)) return;
var endpoints = application.deploymentSpec().instance(instanceName)
.map(ApplicationPackageValidator::allEndpointsOf)
.orElseGet(List::of);
var newEndpoints = allEndpointsOf(applicationPackage.deploymentSpec().requireInstance(instanceName));
if (newEndpoints.containsAll(endpoints)) return;
if (containsAllDestinationsOf(endpoints, newEndpoints)) return;
var removedEndpoints = new ArrayList<>(endpoints);
removedEndpoints.removeAll(newEndpoints);
newEndpoints.removeAll(endpoints);
throw new IllegalArgumentException(validationId.value() + ": application '" + application.id() +
(instanceName.isDefault() ? "" : "." + instanceName.value()) +
"' has endpoints " + endpoints +
", but does not include all of these in deployment.xml. Deploying given " +
"deployment.xml will remove " + removedEndpoints +
(newEndpoints.isEmpty() ? "" : " and add " + newEndpoints) +
". " + ValidationOverrides.toAllowMessage(validationId));
}
/** Returns whether newEndpoints contains all destinations in endpoints */
private static boolean containsAllDestinationsOf(List<Endpoint> endpoints, List<Endpoint> newEndpoints) {
var containsAllRegions = true;
var hasSameCluster = true;
for (var endpoint : endpoints) {
var endpointContainsAllRegions = false;
var endpointHasSameCluster = false;
for (var newEndpoint : newEndpoints) {
if (endpoint.endpointId().equals(newEndpoint.endpointId())) {
endpointContainsAllRegions = newEndpoint.regions().containsAll(endpoint.regions());
endpointHasSameCluster = newEndpoint.containerId().equals(endpoint.containerId());
}
}
containsAllRegions &= endpointContainsAllRegions;
hasSameCluster &= endpointHasSameCluster;
}
return containsAllRegions && hasSameCluster;
}
/** Returns all configued endpoints of given deployment instance spec */
private static List<Endpoint> allEndpointsOf(DeploymentInstanceSpec deploymentInstanceSpec) {
var endpoints = new ArrayList<>(deploymentInstanceSpec.endpoints());
legacyEndpoint(deploymentInstanceSpec).ifPresent(endpoints::add);
return endpoints;
}
/** Returns global service ID as an endpoint, if any global service ID is set */
private static Optional<Endpoint> legacyEndpoint(DeploymentInstanceSpec instance) {
return instance.globalServiceId().map(globalServiceId -> {
var targets = instance.zones().stream()
.filter(zone -> zone.environment().isProduction())
.flatMap(zone -> zone.region().stream())
.distinct()
.map(region -> new Endpoint.Target(region, instance.name(), 1))
.collect(Collectors.toList());
return new Endpoint(EndpointId.defaultId().id(), globalServiceId, Endpoint.Level.instance, targets);
});
}
/** Returns a list of the non-compactable IDs of given instance and endpoint */
private static List<String> nonCompactableIds(InstanceName instance, Endpoint endpoint) {
List<String> ids = new ArrayList<>(2);
if (!instance.isDefault()) {
ids.add(instance.value());
}
if (!"default".equals(endpoint.endpointId())) {
ids.add(endpoint.endpointId());
}
return ids;
}
private static class InstanceEndpoint {
private final InstanceName instance;
private final String endpointId;
public InstanceEndpoint(InstanceName instance, String endpointId) {
this.instance = instance;
this.endpointId = endpointId;
}
}
} |
I was thinking maybe there are other format in JAR, like HIVE/Spark/Presto etc. | public void analyze(Analyzer analyzer) throws UserException {
super.analyze(analyzer);
analyzeCommon(analyzer);
if (isAggregate) {
analyzeUda();
} else {
if (isStarrocksJar) {
analyzeStarrocksJarUdf();
} else {
analyzeUdf();
}
}
} | if (isStarrocksJar) { | public void analyze(Analyzer analyzer) throws UserException {
super.analyze(analyzer);
analyzeCommon(analyzer);
if (isAggregate) {
analyzeUda();
} else {
Preconditions.checkArgument(isStarrocksJar);
analyzeStarrocksJarUdf();
}
} | class CreateFunctionStmt extends DdlStmt {
public static final String OBJECT_FILE_KEY = "object_file";
public static final String SYMBOL_KEY = "symbol";
public static final String PREPARE_SYMBOL_KEY = "prepare_fn";
public static final String CLOSE_SYMBOL_KEY = "close_fn";
public static final String MD5_CHECKSUM = "md5";
public static final String INIT_KEY = "init_fn";
public static final String UPDATE_KEY = "update_fn";
public static final String MERGE_KEY = "merge_fn";
public static final String SERIALIZE_KEY = "serialize_fn";
public static final String FINALIZE_KEY = "finalize_fn";
public static final String GET_VALUE_KEY = "get_value_fn";
public static final String REMOVE_KEY = "remove_fn";
public static final String TYPE_KEY = "type";
public static final String OBJECT_FORMAT_STARROCKS_JAR = "StarrocksJar";
public static final String EVAL_METHOD_NAME = "evaluate";
private final FunctionName functionName;
private final boolean isAggregate;
private final FunctionArgsDef argsDef;
private final TypeDef returnType;
private TypeDef intermediateType;
private final Map<String, String> properties;
private boolean isStarrocksJar = false;
private String objectFile;
private Function function;
private String checksum;
private Class udfClass;
private static final ImmutableMap<PrimitiveType, Class> PrimitiveTypeToJavaClassType = new ImmutableMap.Builder<PrimitiveType, Class>()
.put(PrimitiveType.BOOLEAN, Boolean.class)
.put(PrimitiveType.TINYINT, Byte.class)
.put(PrimitiveType.SMALLINT, Short.class)
.put(PrimitiveType.INT, Integer.class)
.put(PrimitiveType.FLOAT, Float.class)
.put(PrimitiveType.DOUBLE, Double.class)
.put(PrimitiveType.BIGINT, Long.class)
.put(PrimitiveType.CHAR, String.class)
.put(PrimitiveType.VARCHAR, String.class)
.build();
public CreateFunctionStmt(boolean isAggregate, FunctionName functionName, FunctionArgsDef argsDef,
TypeDef returnType, TypeDef intermediateType, Map<String, String> properties) {
this.functionName = functionName;
this.isAggregate = isAggregate;
this.argsDef = argsDef;
this.returnType = returnType;
this.intermediateType = intermediateType;
if (properties == null) {
this.properties = ImmutableSortedMap.of();
} else {
this.properties = ImmutableSortedMap.copyOf(properties, String.CASE_INSENSITIVE_ORDER);
}
}
public FunctionName getFunctionName() {
return functionName;
}
public Function getFunction() {
return function;
}
@Override
private void analyzeCommon(Analyzer analyzer) throws AnalysisException {
functionName.analyze(analyzer);
if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN");
}
argsDef.analyze(analyzer);
returnType.analyze(analyzer);
if (intermediateType != null) {
intermediateType.analyze(analyzer);
} else {
intermediateType = returnType;
}
String object_format = properties.get(TYPE_KEY);
if (OBJECT_FORMAT_STARROCKS_JAR.equals(object_format)) {
isStarrocksJar = true;
}
objectFile = properties.get(OBJECT_FILE_KEY);
if (Strings.isNullOrEmpty(objectFile)) {
throw new AnalysisException("No 'object_file' in properties");
}
try {
computeObjectChecksum();
} catch (IOException | NoSuchAlgorithmException e) {
throw new AnalysisException("cannot to compute object's checksum");
}
String md5sum = properties.get(MD5_CHECKSUM);
if (md5sum != null && !md5sum.equalsIgnoreCase(checksum)) {
throw new AnalysisException("library's checksum is not equal with input, checksum=" + checksum);
}
if (isStarrocksJar) {
analyzeUdfClassInStarrocksJar();
}
}
private void analyzeUdfClassInStarrocksJar() throws AnalysisException {
String class_name = properties.get(SYMBOL_KEY);
if (Strings.isNullOrEmpty(class_name)) {
throw new AnalysisException("No '" + SYMBOL_KEY + "' in properties");
}
try {
URL[] urls = {new URL("jar:" + objectFile + "!/")};
URLClassLoader cl = URLClassLoader.newInstance(urls);
udfClass = cl.loadClass(class_name);
} catch (MalformedURLException e) {
throw new AnalysisException("failed to load object_file: " + objectFile);
} catch (ClassNotFoundException e) {
throw new AnalysisException("class '" + class_name + "' not found in object_file :" + objectFile);
}
}
private void computeObjectChecksum() throws IOException, NoSuchAlgorithmException {
if (FeConstants.runningUnitTest) {
checksum = "";
return;
}
URL url = new URL(objectFile);
URLConnection urlConnection = url.openConnection();
InputStream inputStream = urlConnection.getInputStream();
MessageDigest digest = MessageDigest.getInstance("MD5");
byte[] buf = new byte[4096];
int bytesRead = 0;
do {
bytesRead = inputStream.read(buf);
if (bytesRead < 0) {
break;
}
digest.update(buf, 0, bytesRead);
} while (true);
checksum = Hex.encodeHexString(digest.digest());
}
private void analyzeUda() throws AnalysisException {
AggregateFunction.AggregateFunctionBuilder builder =
AggregateFunction.AggregateFunctionBuilder.createUdfBuilder();
builder.name(functionName).argsType(argsDef.getArgTypes()).retType(returnType.getType()).
hasVarArgs(argsDef.isVariadic()).intermediateType(intermediateType.getType()).objectFile(objectFile);
String initFnSymbol = properties.get(INIT_KEY);
if (initFnSymbol == null) {
throw new AnalysisException("No 'init_fn' in properties");
}
String updateFnSymbol = properties.get(UPDATE_KEY);
if (updateFnSymbol == null) {
throw new AnalysisException("No 'update_fn' in properties");
}
String mergeFnSymbol = properties.get(MERGE_KEY);
if (mergeFnSymbol == null) {
throw new AnalysisException("No 'merge_fn' in properties");
}
function = builder.build();
function.setChecksum(checksum);
}
private void analyzeUdf() throws AnalysisException {
String symbol = properties.get(SYMBOL_KEY);
if (Strings.isNullOrEmpty(symbol)) {
throw new AnalysisException("No 'symbol' in properties");
}
String prepareFnSymbol = properties.get(PREPARE_SYMBOL_KEY);
String closeFnSymbol = properties.get(CLOSE_SYMBOL_KEY);
function = ScalarFunction.createUdf(
functionName, argsDef.getArgTypes(),
returnType.getType(), argsDef.isVariadic(), TFunctionBinaryType.HIVE,
objectFile, symbol, prepareFnSymbol, closeFnSymbol);
function.setChecksum(checksum);
}
private void checkStarrocksJarUdfType(Type type, Class ptype, String pname) throws AnalysisException {
if (!(type instanceof ScalarType)) {
throw new AnalysisException("UDF does not support non-scalar type: " + type);
}
ScalarType scalarType = (ScalarType) type;
Class cls = PrimitiveTypeToJavaClassType.get(scalarType.getPrimitiveType());
if (cls == null) {
throw new AnalysisException("UDF does not support type: " + scalarType);
}
if (!cls.equals(ptype)) {
throw new AnalysisException(String.format("UDF %s[%s] type does not match %s", pname,
ptype.getCanonicalName(), cls.getCanonicalName()));
}
}
private void checkStarrocksJarUdfMethod(Method method) throws AnalysisException {
String name = method.getName();
boolean checked = true;
if (EVAL_METHOD_NAME.equals(name)) {
Class retType = method.getReturnType();
checkStarrocksJarUdfType(returnType.getType(), retType, "Return");
if (method.getParameters().length != argsDef.getArgTypes().length) {
throw new AnalysisException(String.format("UDF '%s' parameter count does not match", name));
}
for (int i = 0; i < method.getParameters().length; i++) {
Parameter p = method.getParameters()[i];
checkStarrocksJarUdfType(argsDef.getArgTypes()[i], p.getType(), p.getName());
}
} else {
checked = false;
}
if (checked) {
if (Modifier.isStatic(method.getModifiers())) {
throw new AnalysisException(String.format("UDF '%s' should be non-static method", name));
}
}
}
private void checkStarrocksJarUdfClass() throws AnalysisException {
int evalMethodCount = 0;
for (Method m : udfClass.getMethods()) {
if (EVAL_METHOD_NAME.equals(m.getName())) {
evalMethodCount += 1;
}
checkStarrocksJarUdfMethod(m);
}
if (evalMethodCount != 1) {
throw new AnalysisException(String.format("UDF should have only one '%s' method", EVAL_METHOD_NAME));
}
}
private void analyzeStarrocksJarUdf() throws AnalysisException {
checkStarrocksJarUdfClass();
function = ScalarFunction.createUdf(
functionName, argsDef.getArgTypes(),
returnType.getType(), argsDef.isVariadic(), TFunctionBinaryType.SRJAR,
objectFile, udfClass.getCanonicalName(), "", "");
function.setChecksum(checksum);
}
@Override
public String toSql() {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("CREATE ");
if (isAggregate) {
stringBuilder.append("AGGREGATE ");
}
stringBuilder.append("FUNCTION ");
stringBuilder.append(functionName.toString());
stringBuilder.append(argsDef.toSql());
stringBuilder.append(" RETURNS ");
stringBuilder.append(returnType.toString());
if (properties.size() > 0) {
stringBuilder.append(" PROPERTIES (");
int i = 0;
for (Map.Entry<String, String> entry : properties.entrySet()) {
if (i != 0) {
stringBuilder.append(", ");
}
stringBuilder.append('"').append(entry.getKey()).append('"');
stringBuilder.append("=");
stringBuilder.append('"').append(entry.getValue()).append('"');
i++;
}
stringBuilder.append(")");
}
return stringBuilder.toString();
}
@Override
public RedirectStatus getRedirectStatus() {
return RedirectStatus.FORWARD_WITH_SYNC;
}
} | class CreateFunctionStmt extends DdlStmt {
public static final String FILE_KEY = "file";
public static final String SYMBOL_KEY = "symbol";
public static final String MD5_CHECKSUM = "md5";
public static final String INIT_KEY = "init_fn";
public static final String UPDATE_KEY = "update_fn";
public static final String MERGE_KEY = "merge_fn";
public static final String TYPE_KEY = "type";
public static final String TYPE_STARROCKS_JAR = "StarrocksJar";
public static final String EVAL_METHOD_NAME = "evaluate";
private final FunctionName functionName;
private final boolean isAggregate;
private final FunctionArgsDef argsDef;
private final TypeDef returnType;
private TypeDef intermediateType;
private final Map<String, String> properties;
private boolean isStarrocksJar = false;
private String objectFile;
private Function function;
private String checksum;
private Class udfClass;
private static final ImmutableMap<PrimitiveType, Class> PrimitiveTypeToJavaClassType = new ImmutableMap.Builder<PrimitiveType, Class>()
.put(PrimitiveType.BOOLEAN, Boolean.class)
.put(PrimitiveType.TINYINT, Byte.class)
.put(PrimitiveType.SMALLINT, Short.class)
.put(PrimitiveType.INT, Integer.class)
.put(PrimitiveType.FLOAT, Float.class)
.put(PrimitiveType.DOUBLE, Double.class)
.put(PrimitiveType.BIGINT, Long.class)
.put(PrimitiveType.CHAR, String.class)
.put(PrimitiveType.VARCHAR, String.class)
.build();
public CreateFunctionStmt(boolean isAggregate, FunctionName functionName, FunctionArgsDef argsDef,
TypeDef returnType, TypeDef intermediateType, Map<String, String> properties) {
this.functionName = functionName;
this.isAggregate = isAggregate;
this.argsDef = argsDef;
this.returnType = returnType;
this.intermediateType = intermediateType;
if (properties == null) {
this.properties = ImmutableSortedMap.of();
} else {
this.properties = ImmutableSortedMap.copyOf(properties, String.CASE_INSENSITIVE_ORDER);
}
}
public FunctionName getFunctionName() {
return functionName;
}
public Function getFunction() {
return function;
}
@Override
private void analyzeCommon(Analyzer analyzer) throws AnalysisException {
functionName.analyze(analyzer);
if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN");
}
argsDef.analyze(analyzer);
returnType.analyze(analyzer);
if (intermediateType != null) {
intermediateType.analyze(analyzer);
} else {
intermediateType = returnType;
}
String type = properties.get(TYPE_KEY);
if (TYPE_STARROCKS_JAR.equals(type)) {
isStarrocksJar = true;
}
objectFile = properties.get(FILE_KEY);
if (Strings.isNullOrEmpty(objectFile)) {
throw new AnalysisException("No 'object_file' in properties");
}
try {
computeObjectChecksum();
} catch (IOException | NoSuchAlgorithmException e) {
throw new AnalysisException("cannot to compute object's checksum");
}
String md5sum = properties.get(MD5_CHECKSUM);
if (md5sum != null && !md5sum.equalsIgnoreCase(checksum)) {
throw new AnalysisException("library's checksum is not equal with input, checksum=" + checksum);
}
if (isStarrocksJar) {
analyzeUdfClassInStarrocksJar();
}
}
private void analyzeUdfClassInStarrocksJar() throws AnalysisException {
String class_name = properties.get(SYMBOL_KEY);
if (Strings.isNullOrEmpty(class_name)) {
throw new AnalysisException("No '" + SYMBOL_KEY + "' in properties");
}
try {
URL[] urls = {new URL("jar:" + objectFile + "!/")};
URLClassLoader cl = URLClassLoader.newInstance(urls);
udfClass = cl.loadClass(class_name);
} catch (MalformedURLException e) {
throw new AnalysisException("failed to load object_file: " + objectFile);
} catch (ClassNotFoundException e) {
throw new AnalysisException("class '" + class_name + "' not found in object_file :" + objectFile);
}
}
private void computeObjectChecksum() throws IOException, NoSuchAlgorithmException {
if (FeConstants.runningUnitTest) {
checksum = "";
return;
}
URL url = new URL(objectFile);
URLConnection urlConnection = url.openConnection();
InputStream inputStream = urlConnection.getInputStream();
MessageDigest digest = MessageDigest.getInstance("MD5");
byte[] buf = new byte[4096];
int bytesRead = 0;
do {
bytesRead = inputStream.read(buf);
if (bytesRead < 0) {
break;
}
digest.update(buf, 0, bytesRead);
} while (true);
checksum = Hex.encodeHexString(digest.digest());
}
private void analyzeUda() throws AnalysisException {
AggregateFunction.AggregateFunctionBuilder builder =
AggregateFunction.AggregateFunctionBuilder.createUdfBuilder();
builder.name(functionName).argsType(argsDef.getArgTypes()).retType(returnType.getType()).
hasVarArgs(argsDef.isVariadic()).intermediateType(intermediateType.getType()).objectFile(objectFile);
String initFnSymbol = properties.get(INIT_KEY);
if (initFnSymbol == null) {
throw new AnalysisException("No 'init_fn' in properties");
}
String updateFnSymbol = properties.get(UPDATE_KEY);
if (updateFnSymbol == null) {
throw new AnalysisException("No 'update_fn' in properties");
}
String mergeFnSymbol = properties.get(MERGE_KEY);
if (mergeFnSymbol == null) {
throw new AnalysisException("No 'merge_fn' in properties");
}
function = builder.build();
function.setChecksum(checksum);
}
private void checkStarrocksJarUdfType(Type type, Class ptype, String pname) throws AnalysisException {
if (!(type instanceof ScalarType)) {
throw new AnalysisException("UDF does not support non-scalar type: " + type);
}
ScalarType scalarType = (ScalarType) type;
Class cls = PrimitiveTypeToJavaClassType.get(scalarType.getPrimitiveType());
if (cls == null) {
throw new AnalysisException("UDF does not support type: " + scalarType);
}
if (!cls.equals(ptype)) {
throw new AnalysisException(String.format("UDF %s[%s] type does not match %s", pname,
ptype.getCanonicalName(), cls.getCanonicalName()));
}
}
private void checkStarrocksJarUdfMethod(Method method) throws AnalysisException {
String name = method.getName();
boolean checked = true;
if (EVAL_METHOD_NAME.equals(name)) {
Class retType = method.getReturnType();
checkStarrocksJarUdfType(returnType.getType(), retType, "Return");
if (method.getParameters().length != argsDef.getArgTypes().length) {
throw new AnalysisException(String.format("UDF '%s' parameter count does not match", name));
}
for (int i = 0; i < method.getParameters().length; i++) {
Parameter p = method.getParameters()[i];
checkStarrocksJarUdfType(argsDef.getArgTypes()[i], p.getType(), p.getName());
}
} else {
checked = false;
}
if (checked) {
if (Modifier.isStatic(method.getModifiers())) {
throw new AnalysisException(String.format("UDF '%s' should be non-static method", name));
}
if (!Modifier.isPublic(method.getModifiers())) {
throw new AnalysisException(String.format("UDF '%s' should be public method", name));
}
}
}
private void checkStarrocksJarUdfClass() throws AnalysisException {
int evalMethodCount = 0;
for (Method m : udfClass.getMethods()) {
if (EVAL_METHOD_NAME.equals(m.getName())) {
evalMethodCount += 1;
}
checkStarrocksJarUdfMethod(m);
}
if (evalMethodCount != 1) {
throw new AnalysisException(String.format("UDF should have only one '%s' method", EVAL_METHOD_NAME));
}
}
private void analyzeStarrocksJarUdf() throws AnalysisException {
checkStarrocksJarUdfClass();
function = ScalarFunction.createUdf(
functionName, argsDef.getArgTypes(),
returnType.getType(), argsDef.isVariadic(), TFunctionBinaryType.SRJAR,
objectFile, udfClass.getCanonicalName(), "", "");
function.setChecksum(checksum);
}
@Override
public String toSql() {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("CREATE ");
if (isAggregate) {
stringBuilder.append("AGGREGATE ");
}
stringBuilder.append("FUNCTION ");
stringBuilder.append(functionName.toString());
stringBuilder.append(argsDef.toSql());
stringBuilder.append(" RETURNS ");
stringBuilder.append(returnType.toString());
if (properties.size() > 0) {
stringBuilder.append(" PROPERTIES (");
int i = 0;
for (Map.Entry<String, String> entry : properties.entrySet()) {
if (i != 0) {
stringBuilder.append(", ");
}
stringBuilder.append('"').append(entry.getKey()).append('"');
stringBuilder.append("=");
stringBuilder.append('"').append(entry.getValue()).append('"');
i++;
}
stringBuilder.append(")");
}
return stringBuilder.toString();
}
@Override
public RedirectStatus getRedirectStatus() {
return RedirectStatus.FORWARD_WITH_SYNC;
}
} |
Maybe it's better to write the pending verification before we send the email? That way we won't send something that we then fail to write. | public PendingMailVerification sendMailVerification(TenantName tenantName, String email, PendingMailVerification.MailType mailType) {
var verificationCode = UUID.randomUUID().toString();
var verificationDeadline = clock.instant().plus(VERIFICATION_DEADLINE);
var pendingMailVerification = new PendingMailVerification(tenantName, email, verificationCode, verificationDeadline, mailType);
mailer.sendVerificationMail(pendingMailVerification);
writePendingVerification(pendingMailVerification);
return pendingMailVerification;
} | writePendingVerification(pendingMailVerification); | public PendingMailVerification sendMailVerification(TenantName tenantName, String email, PendingMailVerification.MailType mailType) {
if (!email.contains("@")) {
throw new IllegalArgumentException("Invalid email address");
}
var verificationCode = UUID.randomUUID().toString();
var verificationDeadline = clock.instant().plus(VERIFICATION_DEADLINE);
var pendingMailVerification = new PendingMailVerification(tenantName, email, verificationCode, verificationDeadline, mailType);
writePendingVerification(pendingMailVerification);
mailer.sendVerificationMail(pendingMailVerification);
return pendingMailVerification;
} | class MailVerifier {
private final TenantController tenantController;
private final Mailer mailer;
private final CuratorDb curatorDb;
private final Clock clock;
private static final Duration VERIFICATION_DEADLINE = Duration.ofDays(7);
public MailVerifier(TenantController tenantController, Mailer mailer, CuratorDb curatorDb, Clock clock) {
this.tenantController = tenantController;
this.mailer = mailer;
this.curatorDb = curatorDb;
this.clock = clock;
}
public Optional<PendingMailVerification> resendMailVerification(TenantName tenantName, String email, PendingMailVerification.MailType mailType) {
var oldPendingVerification = curatorDb.listPendingMailVerifications()
.stream()
.filter(pendingMailVerification ->
pendingMailVerification.getMailAddress().equals(email) &&
pendingMailVerification.getMailType().equals(mailType) &&
pendingMailVerification.getTenantName().equals(tenantName)
).findFirst();
if (oldPendingVerification.isEmpty())
return Optional.empty();
try (var lock = curatorDb.lockPendingMailVerification(oldPendingVerification.get().getVerificationCode())) {
curatorDb.deletePendingMailVerification(oldPendingVerification.get());
}
return Optional.of(sendMailVerification(tenantName, email, mailType));
}
public boolean verifyMail(String verificationCode) {
return curatorDb.getPendingMailVerification(verificationCode)
.filter(pendingMailVerification -> pendingMailVerification.getVerificationDeadline().isAfter(clock.instant()))
.map(pendingMailVerification -> {
var tenant = requireCloudTenant(pendingMailVerification.getTenantName());
var oldTenantInfo = tenant.info();
var updatedTenantInfo = switch (pendingMailVerification.getMailType()) {
case NOTIFICATIONS -> withTenantContacts(oldTenantInfo, pendingMailVerification);
case TENANT_CONTACT -> oldTenantInfo.withContact(oldTenantInfo.contact()
.withEmail(oldTenantInfo.contact().email().withVerification(true)));
};
tenantController.lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(updatedTenantInfo);
tenantController.store(lockedTenant);
});
try (var lock = curatorDb.lockPendingMailVerification(pendingMailVerification.getVerificationCode())) {
curatorDb.deletePendingMailVerification(pendingMailVerification);
}
return true;
}).orElse(false);
}
private TenantInfo withTenantContacts(TenantInfo oldInfo, PendingMailVerification pendingMailVerification) {
var newContacts = oldInfo.contacts().ofType(TenantContacts.EmailContact.class)
.stream()
.map(contact -> {
if (pendingMailVerification.getMailAddress().equals(contact.email().getEmailAddress()))
return contact.withEmail(contact.email().withVerification(true));
return contact;
}).toList();
return oldInfo.withContacts(new TenantContacts(newContacts));
}
private void writePendingVerification(PendingMailVerification pendingMailVerification) {
var tenant = requireCloudTenant(pendingMailVerification.getTenantName());
try (var lock = curatorDb.lockPendingMailVerification(pendingMailVerification.getVerificationCode())) {
curatorDb.writePendingMailVerification(pendingMailVerification);
}
}
private CloudTenant requireCloudTenant(TenantName tenantName) {
return tenantController.get(tenantName)
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(CloudTenant.class::cast)
.orElseThrow(() -> new IllegalStateException("Mail verification is only applicable for cloud tenants"));
}
} | class MailVerifier {
private final TenantController tenantController;
private final Mailer mailer;
private final CuratorDb curatorDb;
private final Clock clock;
private static final Duration VERIFICATION_DEADLINE = Duration.ofDays(7);
public MailVerifier(TenantController tenantController, Mailer mailer, CuratorDb curatorDb, Clock clock) {
this.tenantController = tenantController;
this.mailer = mailer;
this.curatorDb = curatorDb;
this.clock = clock;
}
public Optional<PendingMailVerification> resendMailVerification(TenantName tenantName, String email, PendingMailVerification.MailType mailType) {
var oldPendingVerification = curatorDb.listPendingMailVerifications()
.stream()
.filter(pendingMailVerification ->
pendingMailVerification.getMailAddress().equals(email) &&
pendingMailVerification.getMailType().equals(mailType) &&
pendingMailVerification.getTenantName().equals(tenantName)
).findFirst();
if (oldPendingVerification.isEmpty())
return Optional.empty();
try (var lock = curatorDb.lockPendingMailVerification(oldPendingVerification.get().getVerificationCode())) {
curatorDb.deletePendingMailVerification(oldPendingVerification.get());
}
return Optional.of(sendMailVerification(tenantName, email, mailType));
}
public boolean verifyMail(String verificationCode) {
return curatorDb.getPendingMailVerification(verificationCode)
.filter(pendingMailVerification -> pendingMailVerification.getVerificationDeadline().isAfter(clock.instant()))
.map(pendingMailVerification -> {
var tenant = requireCloudTenant(pendingMailVerification.getTenantName());
var oldTenantInfo = tenant.info();
var updatedTenantInfo = switch (pendingMailVerification.getMailType()) {
case NOTIFICATIONS -> withTenantContacts(oldTenantInfo, pendingMailVerification);
case TENANT_CONTACT -> oldTenantInfo.withContact(oldTenantInfo.contact()
.withEmail(oldTenantInfo.contact().email().withVerification(true)));
};
tenantController.lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(updatedTenantInfo);
tenantController.store(lockedTenant);
});
try (var lock = curatorDb.lockPendingMailVerification(pendingMailVerification.getVerificationCode())) {
curatorDb.deletePendingMailVerification(pendingMailVerification);
}
return true;
}).orElse(false);
}
private TenantInfo withTenantContacts(TenantInfo oldInfo, PendingMailVerification pendingMailVerification) {
var newContacts = oldInfo.contacts().ofType(TenantContacts.EmailContact.class)
.stream()
.map(contact -> {
if (pendingMailVerification.getMailAddress().equals(contact.email().getEmailAddress()))
return contact.withEmail(contact.email().withVerification(true));
return contact;
}).toList();
return oldInfo.withContacts(new TenantContacts(newContacts));
}
private void writePendingVerification(PendingMailVerification pendingMailVerification) {
try (var lock = curatorDb.lockPendingMailVerification(pendingMailVerification.getVerificationCode())) {
curatorDb.writePendingMailVerification(pendingMailVerification);
}
}
private CloudTenant requireCloudTenant(TenantName tenantName) {
return tenantController.get(tenantName)
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(CloudTenant.class::cast)
.orElseThrow(() -> new IllegalStateException("Mail verification is only applicable for cloud tenants"));
}
} |
Check this before sending the verification email? | private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) {
if (!insp.valid()) return oldContact;
var mergedEmail = optional("email", insp)
.filter(address -> !address.equals(oldContact.email().getEmailAddress()))
.map(address -> {
if (isBillingContact)
return new Email(address, true);
controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT);
return new Email(address, false);
})
.orElse(oldContact.email());
if (!mergedEmail.getEmailAddress().isBlank() && !mergedEmail.getEmailAddress().contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return TenantContact.empty()
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(mergedEmail)
.withPhone(getString(insp.field("phone"), oldContact.phone()));
} | if (!mergedEmail.getEmailAddress().isBlank() && !mergedEmail.getEmailAddress().contains("@")) { | private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) {
if (!insp.valid()) return oldContact;
var mergedEmail = optional("email", insp)
.filter(address -> !address.equals(oldContact.email().getEmailAddress()))
.map(address -> {
if (isBillingContact)
return new Email(address, true);
controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT);
return new Email(address, false);
})
.orElse(oldContact.email());
return TenantContact.empty()
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(mergedEmail)
.withPhone(getString(insp.field("phone"), oldContact.phone()));
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
return switch (request.getMethod()) {
case GET: yield handleGET(path, request);
case PUT: yield handlePUT(path, request);
case POST: yield handlePOST(path, request);
case PATCH: yield handlePATCH(path, request);
case DELETE: yield handleDELETE(path, request);
case OPTIONS: yield handleOPTIONS();
default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
};
}
catch (RestApiException.Forbidden e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (RestApiException.Unauthorized e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return switch (e.code()) {
case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR: yield ErrorResponses.logThrowing(request, log, e);
default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
};
}
catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse accessRequests(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var accessControlService = controller.serviceRegistry().accessControlService();
var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant);
var managedAccess = accessControlService.getManagedAccess(tenant);
var slime = new Slime();
var cursor = slime.setObject();
cursor.setBool("managedAccess", managedAccess);
accessRoleInformation.getPendingRequest()
.ifPresent(membershipRequest -> {
var requestCursor = cursor.setObject("pendingRequest");
requestCursor.setString("requestTime", membershipRequest.getCreationTime());
requestCursor.setString("reason", membershipRequest.getReason());
});
var auditLogCursor = cursor.setArray("auditLog");
accessRoleInformation.getAuditLog()
.forEach(auditLogEntry -> {
var entryCursor = auditLogCursor.addObject();
entryCursor.setString("created", auditLogEntry.getCreationTime());
entryCursor.setString("approver", auditLogEntry.getApprover());
entryCursor.setString("reason", auditLogEntry.getReason());
entryCursor.setString("status", auditLogEntry.getAction());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse requestSshAccess(String tenantName, HttpRequest request) {
if (!isOperator(request)) {
return ErrorResponse.forbidden("Only operators are allowed to request ssh access");
}
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only request access for cloud tenants");
controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName));
return new MessageResponse("OK");
}
private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var inspector = toSlime(request.getData()).get();
var expiry = inspector.field("expiry").valid() ?
Instant.ofEpochMilli(inspector.field("expiry").asLong()) :
Instant.now().plus(1, ChronoUnit.DAYS);
var approve = inspector.field("approve").asBool();
controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve);
return new MessageResponse("OK");
}
private HttpResponse addManagedAccess(String tenantName) {
return setManagedAccess(tenantName, true);
}
private HttpResponse removeManagedAccess(String tenantName) {
return setManagedAccess(tenantName, false);
}
private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only set access privel for cloud tenants");
controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess);
var slime = new Slime();
slime.setObject().setBool("managedAccess", managedAccess);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> handler.apply((CloudTenant) tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email().getEmailAddress());
infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var contact = root.setObject("contact");
contact.setString("name", info.contact().name());
contact.setString("email", info.contact().email().getEmailAddress());
contact.setBool("emailVerified", info.contact().email().isVerified());
var tenant = root.setObject("tenant");
tenant.setString("company", info.name());
tenant.setString("website", info.website());
toSlime(info.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) {
return controller.tenants().get(tenantName)
.map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get()))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var mergedEmail = optional("email", inspector.field("contact"))
.filter(address -> !address.equals(info.contact().email().getEmailAddress()))
.map(address -> {
controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT);
return new Email(address, false);
})
.orElse(info.contact().email());
var mergedContact = TenantContact.empty()
.withName(getString(inspector.field("contact").field("name"), info.contact().name()))
.withEmail(mergedEmail);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address());
var mergedInfo = info
.withName(getString(inspector.field("tenant").field("name"), info.name()))
.withWebsite(getString(inspector.field("tenant").field("website"), info.website()))
.withContact(mergedContact)
.withAddress(mergedAddress);
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var billingContact = info.billingContact();
var contact = root.setObject("contact");
contact.setString("name", billingContact.contact().name());
contact.setString("email", billingContact.contact().email().getEmailAddress());
contact.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var contact = info.billingContact().contact();
var address = info.billingContact().address();
var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address());
var mergedBilling = info.billingContact()
.withContact(mergedContact)
.withAddress(mergedAddress);
var mergedInfo = info.withBilling(mergedBilling);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
toSlime(cloudTenant.info().contacts(), root);
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) {
var mergedInfo = cloudTenant.info()
.withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts()));
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private void validateMergedTenantInfo(TenantInfo mergedInfo) {
if (mergedInfo.contact().name().isBlank()) {
throw new IllegalArgumentException("'contactName' cannot be empty");
}
if (mergedInfo.contact().email().getEmailAddress().isBlank()) {
throw new IllegalArgumentException("'contactEmail' cannot be empty");
}
if (! mergedInfo.contact().email().getEmailAddress().contains("@")) {
throw new IllegalArgumentException("'contactEmail' needs to be an email address");
}
if (! mergedInfo.website().isBlank()) {
try {
new URL(mergedInfo.website());
} catch (MalformedURLException e) {
throw new IllegalArgumentException("'website' needs to be a valid address");
}
}
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email().getEmailAddress());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
Cursor contactsCursor = parentCursor.setArray("contacts");
contacts.all().forEach(contact -> {
Cursor contactCursor = contactsCursor.addObject();
Cursor audiencesArray = contactCursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
contactCursor.setString("email", email.email().getEmailAddress());
contactCursor.setBool("emailVerified", email.email().isVerified());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
});
}
private static TenantContacts.Audience fromAudience(String value) {
return switch (value) {
case "tenant": yield TenantContacts.Audience.TENANT;
case "notifications": yield TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
};
}
private static String toAudience(TenantContacts.Audience audience) {
return switch (audience) {
case TENANT: yield "tenant";
case NOTIFICATIONS: yield "notifications";
};
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString().trim() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
var mergedEmail = optional("contactEmail", insp)
.filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress()))
.map(address -> {
controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT);
return new Email(address, false);
})
.orElse(oldInfo.contact().email());
TenantContact mergedContact = TenantContact.empty()
.withName(getString(insp.field("contactName"), oldInfo.contact().name()))
.withEmail(mergedEmail);
TenantInfo mergedInfo = TenantInfo.empty()
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.website()))
.withContact(mergedContact)
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact()))
.withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts()));
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) {
if (!insp.valid()) return oldAddress;
TenantAddress address = TenantAddress.empty()
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code()))
.withAddress(getString(insp.field("addressLines"), oldAddress.address()));
List<String> fields = List.of(address.address(),
address.code(),
address.country(),
address.city(),
address.region());
if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank))
return address;
throw new IllegalArgumentException("All address fields must be set");
}
private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) {
if (!insp.valid()) return oldContact;
return TenantBilling.empty()
.withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) {
if (!insp.valid()) return oldContacts;
List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> {
String email = inspector.field("email").asString().trim();
List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> fromAudience(audience.asString()))
.toList();
if (!email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return oldContacts.ofType(TenantContacts.EmailContact.class)
.stream()
.filter(contact -> contact.email().getEmailAddress().equals(email))
.findAny()
.map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email()))
.orElseGet(() -> {
controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS);
return new TenantContacts.EmailContact(audiences, new Email(email, false));
});
}).toList();
return new TenantContacts(contacts);
}
private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) {
boolean productionOnly = showOnlyProductionInstances(request);
boolean excludeMessages = "true".equals(request.getProperty("excludeMessages"));
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
tenant.map(t -> Stream.of(TenantName.from(t)))
.orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream())
.flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream())
.filter(notification ->
propertyEquals(request, "application", ApplicationName::from, notification.source().application()) &&
propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) &&
propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) &&
propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) &&
propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) &&
propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level())))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages));
return new SlimeJsonResponse(slime);
}
private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) {
return Optional.ofNullable(request.getProperty(property))
.map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get()))
.orElse(true);
}
private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
if (!excludeMessages) {
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
}
if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value());
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
return switch (type) {
case submission, applicationPackage: yield "applicationPackage";
case testPackage: yield "testPackage";
case deployment: yield "deployment";
case feedBlock: yield "feedBlock";
case reindex: yield "reindex";
};
}
private static String notificationLevelAsString(Notification.Level level) {
return switch (level) {
case info: yield "info";
case warning: yield "warning";
case error: yield "error";
};
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
getTenantOrThrow(tenantName);
List<Application> applications = applicationName.isEmpty() ?
controller.applications().asList(tenant) :
controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get()))
.map(List::of)
.orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist"));
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : applications) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
ZoneId zone = type.zone();
RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision();
byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse devApplicationPackageDiff(RunId runId) {
DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone());
return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
final long build;
String requestedBuild = request.getProperty("build");
if (requestedBuild != null) {
if (requestedBuild.equals("latestDeployed")) {
build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision()
.map(RevisionId::number)
.orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication));
} else {
try {
build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("invalid value for request parameter 'build'", e);
}
}
} else {
build = controller.applications().requireApplication(tenantAndApplication).revisions().last()
.map(version -> version.id().number())
.orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication));
}
RevisionId revision = RevisionId.forProduction(build);
boolean tests = request.getBooleanProperty("tests");
byte[] applicationPackage = tests ?
controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) :
controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision);
String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip";
return new ZipResponse(filename, applicationPackage);
}
private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) {
Slime slime = new Slime();
OptionalInt allowMajor = OptionalInt.empty();
if (allowMajorParam != null) {
try {
allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e);
}
}
Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor);
slime.setObject().setString("compileVersion", compileVersion.toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
if (!applicationId.tenant().equals(TenantName.from(tenantName)))
return ErrorResponse.badRequest("Invalid application id");
var zoneId = requireZone(ZoneId.from(request.getProperty("zone")));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("AWS archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeAwsArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var member = mandatory("member", data).asString();
if (member.isBlank()) {
return ErrorResponse.badRequest("GCP archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + ".");
}
private HttpResponse removeGcpArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id));
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor));
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.down());
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
nodeObject.setString("group", node.group());
nodeObject.setLong("index", node.index());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
return switch (state) {
case failed: yield "failed";
case parked: yield "parked";
case dirty: yield "dirty";
case ready: yield "ready";
case active: yield "active";
case inactive: yield "inactive";
case reserved: yield "reserved";
case provisioned: yield "provisioned";
case breakfixed: yield "breakfixed";
case deprovisioned: yield "deprovisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
};
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
return switch (type) {
case admin: yield "admin";
case content: yield "content";
case container: yield "container";
case combined: yield "combined";
case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
};
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
return switch (diskSpeed) {
case fast : yield "fast";
case slow : yield "slow";
case any : yield "any";
};
}
private static String valueOf(NodeResources.StorageType storageType) {
return switch (storageType) {
case remote : yield "remote";
case local : yield "local";
case any : yield "any";
};
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
@Override
public long maxPendingBytes() {
return 1 << 26;
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
var from = Optional.ofNullable(request.getProperty("from"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.EPOCH);
var until = Optional.ofNullable(request.getProperty("until"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.now(controller.clock()));
var application = ApplicationId.from(tenantName, applicationName, instanceName);
var zone = requireZone(environment, region);
var deployment = new DeploymentId(application, zone);
var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment);
var slime = new Slime();
var root = slime.setObject();
for (var entry : events.entrySet()) {
var serviceRoot = root.setArray(entry.getKey().clusterId().value());
scalingEventsToSlime(entry.getValue(), serviceRoot);
}
return new SlimeJsonResponse(slime);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
boolean upgradeRevision = ! requestObject.field("skipRevision").asBool();
boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") +
(upgradeRevision ? "" : "revision") +
( ! upgradeRevision && ! upgradePlatform ? " and " : "") +
(upgradePlatform ? "" : "platform") +
( ! upgradeRevision || ! upgradePlatform ? " upgrade" : "");
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id + suppressedUpgrades);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) {
var mail = mandatory("mail", inspector).asString();
var type = mandatory("mailType", inspector).asString();
var mailType = switch (type) {
case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT;
case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS;
default -> throw new IllegalArgumentException("Unknown mail type " + type);
};
var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType);
return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) :
ErrorResponse.notFoundError("No pending mail verification found for " + mail);
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), status.application());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application());
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
addRotationId(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void addRotationId(Cursor object, Instance instance) {
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.revisions().last().ifPresent(version -> {
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
addRotationId(object, instance);
List<Deployment> deployments = application.deploymentSpec().instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
Stream.concat(status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment()),
controller.jobController().active(instance.id()).stream()
.map(run -> run.id().job())
.filter(job -> job.type().environment().isManuallyDeployed()))
.map(job -> job.type().zone())
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change, Application application) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision)));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
for (var endpoint : zoneEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application)
.targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
for (var endpoint : declaredEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", application.revisions().get(deployment.revision()).stringId());
response.setLong("build", deployment.revision().number());
Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment);
response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli()));
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
if (!deployment.zone().environment().isManuallyDeployed()) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone()));
Optional.ofNullable(status.jobSteps().get(jobId))
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision()));
if ( ! status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else
response.setString("status", "running");
});
} else {
var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId()));
deploymentRun.ifPresent(run -> {
response.setString("status", run.hasEnded() ? "complete" : "running");
});
}
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
DeploymentId deploymentId = new DeploymentId(instance.id(), zone);
RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant;
RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out;
controller.routing().of(deploymentId).setRoutingStatus(status, agent);
return new MessageResponse(Text.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId())
.requiresRotation()
.primary();
if (primaryEndpoint.isPresent()) {
DeploymentRoutingContext context = controller.routing().of(deploymentId);
RoutingStatus status = context.routingStatus();
array.addString(primaryEndpoint.get().upstreamName(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.value().name());
statusObject.setString("reason", "");
statusObject.setString("agent", status.agent().name());
statusObject.setLong("timestamp", status.changedAt().getEpochSecond());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId,
serviceName,
DomainName.of(host),
HttpURL.Path.parse("/status").append(restPath),
Query.empty().add(request.getJDiscRequest().parameters()));
}
private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodes(deploymentId);
}
private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Query query = Query.empty().add(request.getJDiscRequest().parameters());
query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString());
return controller.serviceRegistry().configServer().getServiceNodePage(
deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query);
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
if (controller.system().isPublic()) {
User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
TenantInfo info = controller.tenants().require(tenant, CloudTenant.class)
.info()
.withContact(TenantContact.from(user.name(), new Email(user.email(), true)));
controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(info);
controller.tenants().store(lockedTenant);
});
}
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName), Tags.empty());
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if ( ! versionStatus.isActive(version) && ! isOperator(request))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Inspector buildField = toSlime(request.getData()).get().field("build");
long build = buildField.valid() ? buildField.asLong() : -1;
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
RevisionId revision = build == -1 ? application.get().revisions().last().get().id()
: getRevision(application.get(), build);
Change change = Change.of(revision);
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
private RevisionId getRevision(Application application, long build) {
return application.revisions().withPackage().stream()
.map(ApplicationVersion::id)
.filter(version -> version.number() == build)
.findFirst()
.filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(),
application.id().application(),
build))
.orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found"));
}
private HttpResponse cancelBuild(String tenantName, String applicationName, String build){
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
RevisionId revision = RevisionId.forProduction(Long.parseLong(build));
controller.applications().lockApplicationOrThrow(id, application -> {
controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped())));
for (Instance instance : application.get().instances().values())
if (instance.change().revision().equals(Optional.of(revision)))
controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION);
});
return new MessageResponse("Marked build '" + build + "' as non-deployable");
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.toList();
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.toList();
Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null;
boolean indexedOnly = request.getBooleanProperty("indexedOnly");
controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed);
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) +
(indexedOnly ? ", for indexed types" : "") +
(speed != null ? ", with speed " + speed : ""));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed));
}
private static String toString(ApplicationReindexing.State state) {
return switch (state) {
case PENDING: yield "pending";
case RUNNING: yield "running";
case FAILED: yield "failed";
case SUCCESSFUL: yield "successful";
};
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone()),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
ensureApplicationExists(TenantAndApplicationId.from(id), request);
boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("dryRun", options))
.map(Boolean::valueOf)
.orElse(false);
controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request));
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
boolean forget = request.getBooleanProperty("forget");
if (forget && !isOperator(request))
return ErrorResponse.forbidden("Only operators can forget a tenant");
controller.tenants().delete(TenantName.from(tenantName),
Optional.of(accessControlRequests.credentials(TenantName.from(tenantName),
toSlime(request.getData()).get(),
request.getJDiscRequest())),
forget);
return new MessageResponse("Deleted tenant " + tenantName);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId()))
.filter(run -> ! run.hasEnded())
.ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName()));
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(prodInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(prodInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
ApplicationId toTest = type.isProduction() ? prodInstanceId : id;
if ( ! type.isProduction())
deployments.add(new DeploymentId(toTest, type.zone()));
Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone());
if (deployment == null)
throw new NotExistsException(toTest + " is not deployed in " + type.zone());
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
deployment.version(),
deployment.revision(),
deployment.at(),
controller.routing().readTestRunnerEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null);
if (report != null) {
Cursor cursor = report.get();
boolean force = request.getBooleanProperty("force");
if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) {
throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString());
}
}
Slime requestPayload;
try {
requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes());
} catch (Exception e) {
throw new IllegalArgumentException("Missing or invalid JSON in request content", e);
}
Cursor requestPayloadCursor = requestPayload.get();
String configId = requestPayloadCursor.field("configId").asString();
long expiresAt = requestPayloadCursor.field("expiresAt").asLong();
if (configId.isEmpty()) {
throw new IllegalArgumentException("Missing configId");
}
Cursor artifactsCursor = requestPayloadCursor.field("artifacts");
int artifactEntries = artifactsCursor.entries();
if (artifactEntries == 0) {
throw new IllegalArgumentException("Missing or empty 'artifacts'");
}
Slime dumpRequest = new Slime();
Cursor dumpRequestCursor = dumpRequest.setObject();
dumpRequestCursor.setLong("createdMillis", controller.clock().millis());
dumpRequestCursor.setString("configId", configId);
Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts");
for (int i = 0; i < artifactEntries; i++) {
dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString());
}
if (expiresAt > 0) {
dumpRequestCursor.setLong("expiresAt", expiresAt);
}
Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions");
if (dumpOptionsCursor.children() > 0) {
SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions"));
}
var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest))));
nodeRepository.updateReports(zone, hostname, reportsUpdate);
boolean wait = request.getBooleanProperty("wait");
if (!wait) return new MessageResponse("Request created");
return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname);
}
private HttpResponse getServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname)
.orElseThrow(() -> new NotExistsException("No service dump for node " + hostname));
return new SlimeJsonResponse(report);
}
private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
int pollInterval = 2;
Slime report;
while (true) {
report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get();
Cursor cursor = report.get();
if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) {
break;
}
final Slime copyForLambda = report;
log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda))));
log.fine("Sleeping " + pollInterval + " seconds before checking report status again");
controller.sleeper().sleep(Duration.ofSeconds(pollInterval));
}
return new SlimeJsonResponse(report);
}
private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
Node node;
try {
node = nodeRepository.getNode(zone, hostname);
} catch (IllegalArgumentException e) {
throw new NotExistsException(hostname);
}
ApplicationId app = ApplicationId.from(tenant, application, instance);
ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner"));
if (!app.equals(owner)) {
throw new IllegalArgumentException("Node is not owned by " + app.toFullString());
}
String json = node.reports().get("serviceDump");
if (json == null) return Optional.empty();
return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
try {
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(object.setObject("quota"), usedQuota);
} catch (Exception e) {
log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e)));
}
cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role));
toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess"));
break;
}
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values();
if (instances.isEmpty() && !showOnlyActiveInstances(request))
toSlime(application.id(), applicationArray.addObject(), request);
for (Instance instance : instances) {
if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
} else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(ArchiveAccess archiveAccess, Cursor object) {
archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role));
archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member));
}
private void toSlime(Cursor object, QuotaUsage usage) {
object.setDouble("budgetUsed", usage.rate());
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("peakCpu", utilization.peakCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("peakMemory", utilization.peakMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
utilizationObject.setDouble("peakDisk", utilization.peakDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream()
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment)))
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
if (tenant.type() == Tenant.Type.deleted)
object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new IllegalArgumentException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static boolean showOnlyActiveInstances(HttpRequest request) {
return "true".equals(request.getProperty("activeInstances"));
}
private static boolean includeDeleted(HttpRequest request) {
return "true".equals(request.getProperty("includeDeleted"));
}
private static String tenantType(Tenant tenant) {
return switch (tenant.type()) {
case athenz: yield "ATHENS";
case cloud: yield "CLOUD";
case deleted: yield "DELETED";
};
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry());
}
private RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong();
projectId = projectId == 0 ? 1 : projectId;
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
Optional<String> description = optional("description", submitOptions);
int risk = (int) submitOptions.field("risk").asLong();
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application);
ensureApplicationExists(id, request);
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId);
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), 0),
0);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
return requireZone(ZoneId.from(environment, region));
}
private ZoneId requireZone(ZoneId zone) {
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
return switch (state) {
case in: yield "IN";
case out: yield "OUT";
case unknown: yield "UNKNOWN";
};
}
private static String endpointScopeString(Endpoint.Scope scope) {
return switch (scope) {
case weighted: yield "weighted";
case application: yield "application";
case global: yield "global";
case zone: yield "zone";
};
}
private static String routingMethodString(RoutingMethod method) {
return switch (method) {
case exclusive: yield "exclusive";
case sharedLayer4: yield "sharedLayer4";
};
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) {
if (controller.applications().getApplication(id).isEmpty()) {
if (controller.system().isPublic() || hasOktaContext(request)) {
log.fine("Application does not exist in public, creating: " + id);
var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest());
controller.applications().createApplication(id, credentials);
} else {
log.fine("Application does not exist in hosted, failing: " + id);
throw new IllegalArgumentException("Application does not exist. Create application in Console first.");
}
}
}
private boolean hasOktaContext(HttpRequest request) {
try {
OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context());
return true;
} catch (IllegalArgumentException e) {
return false;
}
}
private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) {
List<ZoneId> productionZones = spec.zones().stream()
.filter(z -> z.region().isPresent())
.map(z -> ZoneId.from(z.environment(), z.region().get()))
.toList();
return deployments.stream()
.sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone())))
.collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
return switch (request.getMethod()) {
case GET: yield handleGET(path, request);
case PUT: yield handlePUT(path, request);
case POST: yield handlePOST(path, request);
case PATCH: yield handlePATCH(path, request);
case DELETE: yield handleDELETE(path, request);
case OPTIONS: yield handleOPTIONS();
default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
};
}
catch (RestApiException.Forbidden e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (RestApiException.Unauthorized e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return switch (e.code()) {
case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR: yield ErrorResponses.logThrowing(request, log, e);
default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
};
}
catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse accessRequests(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var accessControlService = controller.serviceRegistry().accessControlService();
var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant);
var managedAccess = accessControlService.getManagedAccess(tenant);
var slime = new Slime();
var cursor = slime.setObject();
cursor.setBool("managedAccess", managedAccess);
accessRoleInformation.getPendingRequest()
.ifPresent(membershipRequest -> {
var requestCursor = cursor.setObject("pendingRequest");
requestCursor.setString("requestTime", membershipRequest.getCreationTime());
requestCursor.setString("reason", membershipRequest.getReason());
});
var auditLogCursor = cursor.setArray("auditLog");
accessRoleInformation.getAuditLog()
.forEach(auditLogEntry -> {
var entryCursor = auditLogCursor.addObject();
entryCursor.setString("created", auditLogEntry.getCreationTime());
entryCursor.setString("approver", auditLogEntry.getApprover());
entryCursor.setString("reason", auditLogEntry.getReason());
entryCursor.setString("status", auditLogEntry.getAction());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse requestSshAccess(String tenantName, HttpRequest request) {
if (!isOperator(request)) {
return ErrorResponse.forbidden("Only operators are allowed to request ssh access");
}
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only request access for cloud tenants");
controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName));
return new MessageResponse("OK");
}
private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var inspector = toSlime(request.getData()).get();
var expiry = inspector.field("expiry").valid() ?
Instant.ofEpochMilli(inspector.field("expiry").asLong()) :
Instant.now().plus(1, ChronoUnit.DAYS);
var approve = inspector.field("approve").asBool();
controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve);
return new MessageResponse("OK");
}
private HttpResponse addManagedAccess(String tenantName) {
return setManagedAccess(tenantName, true);
}
private HttpResponse removeManagedAccess(String tenantName) {
return setManagedAccess(tenantName, false);
}
private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only set access privel for cloud tenants");
controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess);
var slime = new Slime();
slime.setObject().setBool("managedAccess", managedAccess);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> handler.apply((CloudTenant) tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email().getEmailAddress());
infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var contact = root.setObject("contact");
contact.setString("name", info.contact().name());
contact.setString("email", info.contact().email().getEmailAddress());
contact.setBool("emailVerified", info.contact().email().isVerified());
var tenant = root.setObject("tenant");
tenant.setString("company", info.name());
tenant.setString("website", info.website());
toSlime(info.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) {
return controller.tenants().get(tenantName)
.map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get()))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var mergedEmail = optional("email", inspector.field("contact"))
.filter(address -> !address.equals(info.contact().email().getEmailAddress()))
.map(address -> {
controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT);
return new Email(address, false);
})
.orElse(info.contact().email());
var mergedContact = TenantContact.empty()
.withName(getString(inspector.field("contact").field("name"), info.contact().name()))
.withEmail(mergedEmail);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address());
var mergedInfo = info
.withName(getString(inspector.field("tenant").field("name"), info.name()))
.withWebsite(getString(inspector.field("tenant").field("website"), info.website()))
.withContact(mergedContact)
.withAddress(mergedAddress);
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var billingContact = info.billingContact();
var contact = root.setObject("contact");
contact.setString("name", billingContact.contact().name());
contact.setString("email", billingContact.contact().email().getEmailAddress());
contact.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var contact = info.billingContact().contact();
var address = info.billingContact().address();
var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address());
var mergedBilling = info.billingContact()
.withContact(mergedContact)
.withAddress(mergedAddress);
var mergedInfo = info.withBilling(mergedBilling);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
toSlime(cloudTenant.info().contacts(), root);
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) {
var mergedInfo = cloudTenant.info()
.withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts()));
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private void validateMergedTenantInfo(TenantInfo mergedInfo) {
if (mergedInfo.contact().name().isBlank()) {
throw new IllegalArgumentException("'contactName' cannot be empty");
}
if (mergedInfo.contact().email().getEmailAddress().isBlank()) {
throw new IllegalArgumentException("'contactEmail' cannot be empty");
}
if (! mergedInfo.contact().email().getEmailAddress().contains("@")) {
throw new IllegalArgumentException("'contactEmail' needs to be an email address");
}
if (! mergedInfo.website().isBlank()) {
try {
new URL(mergedInfo.website());
} catch (MalformedURLException e) {
throw new IllegalArgumentException("'website' needs to be a valid address");
}
}
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email().getEmailAddress());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
Cursor contactsCursor = parentCursor.setArray("contacts");
contacts.all().forEach(contact -> {
Cursor contactCursor = contactsCursor.addObject();
Cursor audiencesArray = contactCursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
contactCursor.setString("email", email.email().getEmailAddress());
contactCursor.setBool("emailVerified", email.email().isVerified());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
});
}
private static TenantContacts.Audience fromAudience(String value) {
return switch (value) {
case "tenant": yield TenantContacts.Audience.TENANT;
case "notifications": yield TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
};
}
private static String toAudience(TenantContacts.Audience audience) {
return switch (audience) {
case TENANT: yield "tenant";
case NOTIFICATIONS: yield "notifications";
};
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString().trim() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
var mergedEmail = optional("contactEmail", insp)
.filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress()))
.map(address -> {
controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT);
return new Email(address, false);
})
.orElse(oldInfo.contact().email());
TenantContact mergedContact = TenantContact.empty()
.withName(getString(insp.field("contactName"), oldInfo.contact().name()))
.withEmail(mergedEmail);
TenantInfo mergedInfo = TenantInfo.empty()
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.website()))
.withContact(mergedContact)
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact()))
.withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts()));
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) {
if (!insp.valid()) return oldAddress;
TenantAddress address = TenantAddress.empty()
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code()))
.withAddress(getString(insp.field("addressLines"), oldAddress.address()));
List<String> fields = List.of(address.address(),
address.code(),
address.country(),
address.city(),
address.region());
if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank))
return address;
throw new IllegalArgumentException("All address fields must be set");
}
private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) {
if (!insp.valid()) return oldContact;
return TenantBilling.empty()
.withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) {
if (!insp.valid()) return oldContacts;
List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> {
String email = inspector.field("email").asString().trim();
List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> fromAudience(audience.asString()))
.toList();
return oldContacts.ofType(TenantContacts.EmailContact.class)
.stream()
.filter(contact -> contact.email().getEmailAddress().equals(email))
.findAny()
.map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email()))
.orElseGet(() -> {
controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS);
return new TenantContacts.EmailContact(audiences, new Email(email, false));
});
}).toList();
return new TenantContacts(contacts);
}
private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) {
boolean productionOnly = showOnlyProductionInstances(request);
boolean excludeMessages = "true".equals(request.getProperty("excludeMessages"));
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
tenant.map(t -> Stream.of(TenantName.from(t)))
.orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream())
.flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream())
.filter(notification ->
propertyEquals(request, "application", ApplicationName::from, notification.source().application()) &&
propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) &&
propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) &&
propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) &&
propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) &&
propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level())))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages));
return new SlimeJsonResponse(slime);
}
private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) {
return Optional.ofNullable(request.getProperty(property))
.map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get()))
.orElse(true);
}
private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
if (!excludeMessages) {
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
}
if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value());
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
return switch (type) {
case submission, applicationPackage: yield "applicationPackage";
case testPackage: yield "testPackage";
case deployment: yield "deployment";
case feedBlock: yield "feedBlock";
case reindex: yield "reindex";
};
}
private static String notificationLevelAsString(Notification.Level level) {
return switch (level) {
case info: yield "info";
case warning: yield "warning";
case error: yield "error";
};
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
getTenantOrThrow(tenantName);
List<Application> applications = applicationName.isEmpty() ?
controller.applications().asList(tenant) :
controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get()))
.map(List::of)
.orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist"));
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : applications) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
ZoneId zone = type.zone();
RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision();
byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse devApplicationPackageDiff(RunId runId) {
DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone());
return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
final long build;
String requestedBuild = request.getProperty("build");
if (requestedBuild != null) {
if (requestedBuild.equals("latestDeployed")) {
build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision()
.map(RevisionId::number)
.orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication));
} else {
try {
build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("invalid value for request parameter 'build'", e);
}
}
} else {
build = controller.applications().requireApplication(tenantAndApplication).revisions().last()
.map(version -> version.id().number())
.orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication));
}
RevisionId revision = RevisionId.forProduction(build);
boolean tests = request.getBooleanProperty("tests");
byte[] applicationPackage = tests ?
controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) :
controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision);
String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip";
return new ZipResponse(filename, applicationPackage);
}
private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) {
Slime slime = new Slime();
OptionalInt allowMajor = OptionalInt.empty();
if (allowMajorParam != null) {
try {
allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e);
}
}
Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor);
slime.setObject().setString("compileVersion", compileVersion.toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
if (!applicationId.tenant().equals(TenantName.from(tenantName)))
return ErrorResponse.badRequest("Invalid application id");
var zoneId = requireZone(ZoneId.from(request.getProperty("zone")));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("AWS archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeAwsArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var member = mandatory("member", data).asString();
if (member.isBlank()) {
return ErrorResponse.badRequest("GCP archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + ".");
}
private HttpResponse removeGcpArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id));
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor));
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.down());
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
nodeObject.setString("group", node.group());
nodeObject.setLong("index", node.index());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
return switch (state) {
case failed: yield "failed";
case parked: yield "parked";
case dirty: yield "dirty";
case ready: yield "ready";
case active: yield "active";
case inactive: yield "inactive";
case reserved: yield "reserved";
case provisioned: yield "provisioned";
case breakfixed: yield "breakfixed";
case deprovisioned: yield "deprovisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
};
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
return switch (type) {
case admin: yield "admin";
case content: yield "content";
case container: yield "container";
case combined: yield "combined";
case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
};
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
return switch (diskSpeed) {
case fast : yield "fast";
case slow : yield "slow";
case any : yield "any";
};
}
private static String valueOf(NodeResources.StorageType storageType) {
return switch (storageType) {
case remote : yield "remote";
case local : yield "local";
case any : yield "any";
};
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
@Override
public long maxPendingBytes() {
return 1 << 26;
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
var from = Optional.ofNullable(request.getProperty("from"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.EPOCH);
var until = Optional.ofNullable(request.getProperty("until"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.now(controller.clock()));
var application = ApplicationId.from(tenantName, applicationName, instanceName);
var zone = requireZone(environment, region);
var deployment = new DeploymentId(application, zone);
var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment);
var slime = new Slime();
var root = slime.setObject();
for (var entry : events.entrySet()) {
var serviceRoot = root.setArray(entry.getKey().clusterId().value());
scalingEventsToSlime(entry.getValue(), serviceRoot);
}
return new SlimeJsonResponse(slime);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
boolean upgradeRevision = ! requestObject.field("skipRevision").asBool();
boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") +
(upgradeRevision ? "" : "revision") +
( ! upgradeRevision && ! upgradePlatform ? " and " : "") +
(upgradePlatform ? "" : "platform") +
( ! upgradeRevision || ! upgradePlatform ? " upgrade" : "");
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id + suppressedUpgrades);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) {
var mail = mandatory("mail", inspector).asString();
var type = mandatory("mailType", inspector).asString();
var mailType = switch (type) {
case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT;
case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS;
default -> throw new IllegalArgumentException("Unknown mail type " + type);
};
var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType);
return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) :
ErrorResponse.notFoundError("No pending mail verification found for " + mail);
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), status.application());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application());
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
addRotationId(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void addRotationId(Cursor object, Instance instance) {
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.revisions().last().ifPresent(version -> {
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
addRotationId(object, instance);
List<Deployment> deployments = application.deploymentSpec().instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
Stream.concat(status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment()),
controller.jobController().active(instance.id()).stream()
.map(run -> run.id().job())
.filter(job -> job.type().environment().isManuallyDeployed()))
.map(job -> job.type().zone())
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change, Application application) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision)));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
for (var endpoint : zoneEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application)
.targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
for (var endpoint : declaredEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", application.revisions().get(deployment.revision()).stringId());
response.setLong("build", deployment.revision().number());
Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment);
response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli()));
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
if (!deployment.zone().environment().isManuallyDeployed()) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone()));
Optional.ofNullable(status.jobSteps().get(jobId))
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision()));
if ( ! status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else
response.setString("status", "running");
});
} else {
var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId()));
deploymentRun.ifPresent(run -> {
response.setString("status", run.hasEnded() ? "complete" : "running");
});
}
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
DeploymentId deploymentId = new DeploymentId(instance.id(), zone);
RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant;
RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out;
controller.routing().of(deploymentId).setRoutingStatus(status, agent);
return new MessageResponse(Text.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId())
.requiresRotation()
.primary();
if (primaryEndpoint.isPresent()) {
DeploymentRoutingContext context = controller.routing().of(deploymentId);
RoutingStatus status = context.routingStatus();
array.addString(primaryEndpoint.get().upstreamName(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.value().name());
statusObject.setString("reason", "");
statusObject.setString("agent", status.agent().name());
statusObject.setLong("timestamp", status.changedAt().getEpochSecond());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId,
serviceName,
DomainName.of(host),
HttpURL.Path.parse("/status").append(restPath),
Query.empty().add(request.getJDiscRequest().parameters()));
}
private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodes(deploymentId);
}
private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Query query = Query.empty().add(request.getJDiscRequest().parameters());
query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString());
return controller.serviceRegistry().configServer().getServiceNodePage(
deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query);
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
if (controller.system().isPublic()) {
User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
TenantInfo info = controller.tenants().require(tenant, CloudTenant.class)
.info()
.withContact(TenantContact.from(user.name(), new Email(user.email(), true)));
controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(info);
controller.tenants().store(lockedTenant);
});
}
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName), Tags.empty());
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if ( ! versionStatus.isActive(version) && ! isOperator(request))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Inspector buildField = toSlime(request.getData()).get().field("build");
long build = buildField.valid() ? buildField.asLong() : -1;
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
RevisionId revision = build == -1 ? application.get().revisions().last().get().id()
: getRevision(application.get(), build);
Change change = Change.of(revision);
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
private RevisionId getRevision(Application application, long build) {
return application.revisions().withPackage().stream()
.map(ApplicationVersion::id)
.filter(version -> version.number() == build)
.findFirst()
.filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(),
application.id().application(),
build))
.orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found"));
}
private HttpResponse cancelBuild(String tenantName, String applicationName, String build){
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
RevisionId revision = RevisionId.forProduction(Long.parseLong(build));
controller.applications().lockApplicationOrThrow(id, application -> {
controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped())));
for (Instance instance : application.get().instances().values())
if (instance.change().revision().equals(Optional.of(revision)))
controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION);
});
return new MessageResponse("Marked build '" + build + "' as non-deployable");
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.toList();
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.toList();
Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null;
boolean indexedOnly = request.getBooleanProperty("indexedOnly");
controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed);
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) +
(indexedOnly ? ", for indexed types" : "") +
(speed != null ? ", with speed " + speed : ""));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed));
}
private static String toString(ApplicationReindexing.State state) {
return switch (state) {
case PENDING: yield "pending";
case RUNNING: yield "running";
case FAILED: yield "failed";
case SUCCESSFUL: yield "successful";
};
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone()),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
ensureApplicationExists(TenantAndApplicationId.from(id), request);
boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("dryRun", options))
.map(Boolean::valueOf)
.orElse(false);
controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request));
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
boolean forget = request.getBooleanProperty("forget");
if (forget && !isOperator(request))
return ErrorResponse.forbidden("Only operators can forget a tenant");
controller.tenants().delete(TenantName.from(tenantName),
Optional.of(accessControlRequests.credentials(TenantName.from(tenantName),
toSlime(request.getData()).get(),
request.getJDiscRequest())),
forget);
return new MessageResponse("Deleted tenant " + tenantName);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId()))
.filter(run -> ! run.hasEnded())
.ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName()));
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(prodInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(prodInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
ApplicationId toTest = type.isProduction() ? prodInstanceId : id;
if ( ! type.isProduction())
deployments.add(new DeploymentId(toTest, type.zone()));
Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone());
if (deployment == null)
throw new NotExistsException(toTest + " is not deployed in " + type.zone());
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
deployment.version(),
deployment.revision(),
deployment.at(),
controller.routing().readTestRunnerEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null);
if (report != null) {
Cursor cursor = report.get();
boolean force = request.getBooleanProperty("force");
if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) {
throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString());
}
}
Slime requestPayload;
try {
requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes());
} catch (Exception e) {
throw new IllegalArgumentException("Missing or invalid JSON in request content", e);
}
Cursor requestPayloadCursor = requestPayload.get();
String configId = requestPayloadCursor.field("configId").asString();
long expiresAt = requestPayloadCursor.field("expiresAt").asLong();
if (configId.isEmpty()) {
throw new IllegalArgumentException("Missing configId");
}
Cursor artifactsCursor = requestPayloadCursor.field("artifacts");
int artifactEntries = artifactsCursor.entries();
if (artifactEntries == 0) {
throw new IllegalArgumentException("Missing or empty 'artifacts'");
}
Slime dumpRequest = new Slime();
Cursor dumpRequestCursor = dumpRequest.setObject();
dumpRequestCursor.setLong("createdMillis", controller.clock().millis());
dumpRequestCursor.setString("configId", configId);
Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts");
for (int i = 0; i < artifactEntries; i++) {
dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString());
}
if (expiresAt > 0) {
dumpRequestCursor.setLong("expiresAt", expiresAt);
}
Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions");
if (dumpOptionsCursor.children() > 0) {
SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions"));
}
var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest))));
nodeRepository.updateReports(zone, hostname, reportsUpdate);
boolean wait = request.getBooleanProperty("wait");
if (!wait) return new MessageResponse("Request created");
return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname);
}
private HttpResponse getServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname)
.orElseThrow(() -> new NotExistsException("No service dump for node " + hostname));
return new SlimeJsonResponse(report);
}
private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
int pollInterval = 2;
Slime report;
while (true) {
report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get();
Cursor cursor = report.get();
if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) {
break;
}
final Slime copyForLambda = report;
log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda))));
log.fine("Sleeping " + pollInterval + " seconds before checking report status again");
controller.sleeper().sleep(Duration.ofSeconds(pollInterval));
}
return new SlimeJsonResponse(report);
}
private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
Node node;
try {
node = nodeRepository.getNode(zone, hostname);
} catch (IllegalArgumentException e) {
throw new NotExistsException(hostname);
}
ApplicationId app = ApplicationId.from(tenant, application, instance);
ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner"));
if (!app.equals(owner)) {
throw new IllegalArgumentException("Node is not owned by " + app.toFullString());
}
String json = node.reports().get("serviceDump");
if (json == null) return Optional.empty();
return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
try {
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(object.setObject("quota"), usedQuota);
} catch (Exception e) {
log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e)));
}
cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role));
toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess"));
break;
}
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values();
if (instances.isEmpty() && !showOnlyActiveInstances(request))
toSlime(application.id(), applicationArray.addObject(), request);
for (Instance instance : instances) {
if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
} else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(ArchiveAccess archiveAccess, Cursor object) {
archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role));
archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member));
}
private void toSlime(Cursor object, QuotaUsage usage) {
object.setDouble("budgetUsed", usage.rate());
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("peakCpu", utilization.peakCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("peakMemory", utilization.peakMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
utilizationObject.setDouble("peakDisk", utilization.peakDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream()
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment)))
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
if (tenant.type() == Tenant.Type.deleted)
object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new IllegalArgumentException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static boolean showOnlyActiveInstances(HttpRequest request) {
return "true".equals(request.getProperty("activeInstances"));
}
private static boolean includeDeleted(HttpRequest request) {
return "true".equals(request.getProperty("includeDeleted"));
}
private static String tenantType(Tenant tenant) {
return switch (tenant.type()) {
case athenz: yield "ATHENS";
case cloud: yield "CLOUD";
case deleted: yield "DELETED";
};
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry());
}
private RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong();
projectId = projectId == 0 ? 1 : projectId;
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
Optional<String> description = optional("description", submitOptions);
int risk = (int) submitOptions.field("risk").asLong();
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application);
ensureApplicationExists(id, request);
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId);
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), 0),
0);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
return requireZone(ZoneId.from(environment, region));
}
private ZoneId requireZone(ZoneId zone) {
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
return switch (state) {
case in: yield "IN";
case out: yield "OUT";
case unknown: yield "UNKNOWN";
};
}
private static String endpointScopeString(Endpoint.Scope scope) {
return switch (scope) {
case weighted: yield "weighted";
case application: yield "application";
case global: yield "global";
case zone: yield "zone";
};
}
private static String routingMethodString(RoutingMethod method) {
return switch (method) {
case exclusive: yield "exclusive";
case sharedLayer4: yield "sharedLayer4";
};
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) {
if (controller.applications().getApplication(id).isEmpty()) {
if (controller.system().isPublic() || hasOktaContext(request)) {
log.fine("Application does not exist in public, creating: " + id);
var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest());
controller.applications().createApplication(id, credentials);
} else {
log.fine("Application does not exist in hosted, failing: " + id);
throw new IllegalArgumentException("Application does not exist. Create application in Console first.");
}
}
}
private boolean hasOktaContext(HttpRequest request) {
try {
OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context());
return true;
} catch (IllegalArgumentException e) {
return false;
}
}
private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) {
List<ZoneId> productionZones = spec.zones().stream()
.filter(z -> z.region().isPresent())
.map(z -> ZoneId.from(z.environment(), z.region().get()))
.toList();
return deployments.stream()
.sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone())))
.collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
} |
Simplify the below, with cc flavours, as well then ;) | public NodeResources defaultNodeResources(ClusterSpec clusterSpec, ApplicationId applicationId, boolean exclusive) {
if (clusterSpec.type() == ClusterSpec.Type.admin) {
Architecture architecture = adminClusterArchitecture(applicationId);
if (clusterSpec.id().value().equals("cluster-controllers")) {
return clusterControllerResources(clusterSpec, exclusive)
.with(architecture);
}
return (requiresExclusiveHost(clusterSpec.type(), exclusive)
? versioned(clusterSpec, Map.of(new Version(0), smallestExclusiveResources()))
: versioned(clusterSpec, Map.of(new Version(0), smallestSharedResources())))
.with(architecture);
}
if (clusterSpec.type() == ClusterSpec.Type.content) {
return zone.getCloud().dynamicProvisioning()
? versioned(clusterSpec, Map.of(new Version(0), new NodeResources(2.0, 8, 50, 0.3),
new Version(8, 75), new NodeResources(2, 16, 300, 0.3)))
: versioned(clusterSpec, Map.of(new Version(0), new NodeResources(1.5, 8, 50, 0.3),
new Version(8, 75), new NodeResources(2, 16, 300, 0.3)));
}
else {
return zone.getCloud().dynamicProvisioning()
? versioned(clusterSpec, Map.of(new Version(0), new NodeResources(2.0, 8, 50, 0.3)))
: versioned(clusterSpec, Map.of(new Version(0), new NodeResources(1.5, 8, 50, 0.3)));
}
} | public NodeResources defaultNodeResources(ClusterSpec clusterSpec, ApplicationId applicationId, boolean exclusive) {
if (clusterSpec.type() == ClusterSpec.Type.admin) {
Architecture architecture = adminClusterArchitecture(applicationId);
if (clusterSpec.id().value().equals("cluster-controllers")) {
return clusterControllerResources(clusterSpec, exclusive)
.with(architecture);
}
return (requiresExclusiveHost(clusterSpec.type(), exclusive)
? versioned(clusterSpec, Map.of(new Version(0), smallestExclusiveResources()))
: versioned(clusterSpec, Map.of(new Version(0), smallestSharedResources())))
.with(architecture);
}
if (clusterSpec.type() == ClusterSpec.Type.content) {
return zone.getCloud().dynamicProvisioning()
? versioned(clusterSpec, Map.of(new Version(0), new NodeResources(2.0, 8, 50, 0.3),
new Version(8, 75), new NodeResources(2, 16, 300, 0.3)))
: versioned(clusterSpec, Map.of(new Version(0), new NodeResources(1.5, 8, 50, 0.3),
new Version(8, 75), new NodeResources(2, 16, 300, 0.3)));
}
else {
return zone.getCloud().dynamicProvisioning()
? versioned(clusterSpec, Map.of(new Version(0), new NodeResources(2.0, 8, 50, 0.3)))
: versioned(clusterSpec, Map.of(new Version(0), new NodeResources(1.5, 8, 50, 0.3)));
}
} | class CapacityPolicies {
private final Zone zone;
private final JacksonFlag<SharedHost> sharedHosts;
private final StringFlag adminClusterNodeArchitecture;
public CapacityPolicies(NodeRepository nodeRepository) {
this.zone = nodeRepository.zone();
this.sharedHosts = PermanentFlags.SHARED_HOST.bindTo(nodeRepository.flagSource());
this.adminClusterNodeArchitecture = PermanentFlags.ADMIN_CLUSTER_NODE_ARCHITECTURE.bindTo(nodeRepository.flagSource());
}
public Capacity applyOn(Capacity capacity, ApplicationId application, boolean exclusive) {
return capacity.withLimits(applyOn(capacity.minResources(), capacity, application, exclusive),
applyOn(capacity.maxResources(), capacity, application, exclusive));
}
private ClusterResources applyOn(ClusterResources resources, Capacity capacity, ApplicationId application, boolean exclusive) {
int nodes = decideSize(resources.nodes(), capacity.isRequired(), application.instance().isTester());
int groups = Math.min(resources.groups(), nodes);
var nodeResources = decideNodeResources(resources.nodeResources(), capacity.isRequired(), exclusive);
return new ClusterResources(nodes, groups, nodeResources);
}
private int decideSize(int requested, boolean required, boolean isTester) {
if (isTester) return 1;
if (required) return requested;
switch(zone.environment()) {
case dev : case test : return 1;
case perf : return Math.min(requested, 3);
case staging: return requested <= 1 ? requested : Math.max(2, requested / 10);
case prod : return requested;
default : throw new IllegalArgumentException("Unsupported environment " + zone.environment());
}
}
private NodeResources decideNodeResources(NodeResources target, boolean required, boolean exclusive) {
if (required || exclusive) return target;
if (target.isUnspecified()) return target;
if (zone.environment() == Environment.dev && !zone.getCloud().dynamicProvisioning())
target = target.withVcpu(0.1).withBandwidthGbps(0.1);
if (zone.system().isCd() || zone.environment() == Environment.dev || zone.environment() == Environment.test)
target = target.with(NodeResources.DiskSpeed.any).with(NodeResources.StorageType.any).withBandwidthGbps(0.1);
return target;
}
private NodeResources clusterControllerResources(ClusterSpec clusterSpec, boolean exclusive) {
if (requiresExclusiveHost(clusterSpec.type(), exclusive)) {
return versioned(clusterSpec, Map.of(new Version(0), smallestExclusiveResources()));
}
return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25, 1.14, 10, 0.3),
new Version(7, 586, 50), new NodeResources(0.25, 1.333, 10, 0.3),
new Version(7, 586, 54), new NodeResources(0.25, 1.14, 10, 0.3)));
}
private Architecture adminClusterArchitecture(ApplicationId instance) {
return Architecture.valueOf(adminClusterNodeArchitecture.with(APPLICATION_ID, instance.serializedForm()).value());
}
/** Returns whether an exclusive host is required for given cluster type and exclusivity requirement */
private boolean requiresExclusiveHost(ClusterSpec.Type type, boolean exclusive) {
return zone.getCloud().dynamicProvisioning() && (exclusive || !sharedHosts.value().isEnabled(type.name()));
}
/** Returns the resources for the newest version not newer than that requested in the cluster spec. */
static NodeResources versioned(ClusterSpec spec, Map<Version, NodeResources> resources) {
return requireNonNull(new TreeMap<>(resources).floorEntry(spec.vespaVersion()),
"no default resources applicable for " + spec + " among: " + resources)
.getValue();
}
private NodeResources smallestExclusiveResources() {
return (zone.getCloud().name().equals(CloudName.GCP))
? new NodeResources(1, 4, 50, 0.3)
: new NodeResources(0.5, 4, 50, 0.3);
}
private NodeResources smallestSharedResources() {
return (zone.getCloud().name().equals(CloudName.GCP))
? new NodeResources(1, 4, 50, 0.3)
: new NodeResources(0.5, 2, 50, 0.3);
}
/** Returns whether the nodes requested can share physical host with other applications */
public boolean decideExclusivity(Capacity capacity, boolean requestedExclusivity) {
if (capacity.cloudAccount().isPresent()) return true;
return requestedExclusivity && (capacity.isRequired() || zone.environment() == Environment.prod);
}
} | class CapacityPolicies {
private final Zone zone;
private final JacksonFlag<SharedHost> sharedHosts;
private final StringFlag adminClusterNodeArchitecture;
public CapacityPolicies(NodeRepository nodeRepository) {
this.zone = nodeRepository.zone();
this.sharedHosts = PermanentFlags.SHARED_HOST.bindTo(nodeRepository.flagSource());
this.adminClusterNodeArchitecture = PermanentFlags.ADMIN_CLUSTER_NODE_ARCHITECTURE.bindTo(nodeRepository.flagSource());
}
public Capacity applyOn(Capacity capacity, ApplicationId application, boolean exclusive) {
return capacity.withLimits(applyOn(capacity.minResources(), capacity, application, exclusive),
applyOn(capacity.maxResources(), capacity, application, exclusive));
}
private ClusterResources applyOn(ClusterResources resources, Capacity capacity, ApplicationId application, boolean exclusive) {
int nodes = decideSize(resources.nodes(), capacity.isRequired(), application.instance().isTester());
int groups = Math.min(resources.groups(), nodes);
var nodeResources = decideNodeResources(resources.nodeResources(), capacity.isRequired(), exclusive);
return new ClusterResources(nodes, groups, nodeResources);
}
private int decideSize(int requested, boolean required, boolean isTester) {
if (isTester) return 1;
if (required) return requested;
switch(zone.environment()) {
case dev : case test : return 1;
case perf : return Math.min(requested, 3);
case staging: return requested <= 1 ? requested : Math.max(2, requested / 10);
case prod : return requested;
default : throw new IllegalArgumentException("Unsupported environment " + zone.environment());
}
}
private NodeResources decideNodeResources(NodeResources target, boolean required, boolean exclusive) {
if (required || exclusive) return target;
if (target.isUnspecified()) return target;
if (zone.environment() == Environment.dev && !zone.getCloud().dynamicProvisioning())
target = target.withVcpu(0.1).withBandwidthGbps(0.1);
if (zone.system().isCd() || zone.environment() == Environment.dev || zone.environment() == Environment.test)
target = target.with(NodeResources.DiskSpeed.any).with(NodeResources.StorageType.any).withBandwidthGbps(0.1);
return target;
}
private NodeResources clusterControllerResources(ClusterSpec clusterSpec, boolean exclusive) {
if (requiresExclusiveHost(clusterSpec.type(), exclusive)) {
return versioned(clusterSpec, Map.of(new Version(0), smallestExclusiveResources()));
}
return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25, 1.14, 10, 0.3),
new Version(7, 586, 50), new NodeResources(0.25, 1.333, 10, 0.3),
new Version(7, 586, 54), new NodeResources(0.25, 1.14, 10, 0.3)));
}
private Architecture adminClusterArchitecture(ApplicationId instance) {
return Architecture.valueOf(adminClusterNodeArchitecture.with(APPLICATION_ID, instance.serializedForm()).value());
}
/** Returns whether an exclusive host is required for given cluster type and exclusivity requirement */
private boolean requiresExclusiveHost(ClusterSpec.Type type, boolean exclusive) {
return zone.getCloud().dynamicProvisioning() && (exclusive || !sharedHosts.value().isEnabled(type.name()));
}
/** Returns the resources for the newest version not newer than that requested in the cluster spec. */
static NodeResources versioned(ClusterSpec spec, Map<Version, NodeResources> resources) {
return requireNonNull(new TreeMap<>(resources).floorEntry(spec.vespaVersion()),
"no default resources applicable for " + spec + " among: " + resources)
.getValue();
}
private NodeResources smallestExclusiveResources() {
return (zone.getCloud().name().equals(CloudName.GCP))
? new NodeResources(1, 4, 50, 0.3)
: new NodeResources(0.5, 4, 50, 0.3);
}
private NodeResources smallestSharedResources() {
return (zone.getCloud().name().equals(CloudName.GCP))
? new NodeResources(1, 4, 50, 0.3)
: new NodeResources(0.5, 2, 50, 0.3);
}
/** Returns whether the nodes requested can share physical host with other applications */
public boolean decideExclusivity(Capacity capacity, boolean requestedExclusivity) {
if (capacity.cloudAccount().isPresent()) return true;
return requestedExclusivity && (capacity.isRequired() || zone.environment() == Environment.prod);
}
} | |
If these calls are slow, we could also just use a composite value of (class, method), and compare class for identity. | public Method get(Object object, Consumer<String> onPut) {
Method m = cache.get(object.getClass().getName());
if ( m != null && ! m.getDeclaringClass().isAssignableFrom(object.getClass())) {
cache.clear();
m = null;
}
if (m == null) {
m = lookupMethod(object);
if (m != null) {
if (onPut != null)
onPut.accept(object.getClass().getName());
cache.put(object.getClass().getName(), m);
}
}
return m;
} | if ( m != null && ! m.getDeclaringClass().isAssignableFrom(object.getClass())) { | public Method get(Object object, Consumer<String> onPut) {
Pair<Class<?>, Method> pair = cache.get(object.getClass().getName());
if (pair != null && pair.getFirst() != object.getClass()) {
cache.clear();
pair = null;
}
Method method = pair == null ? null : pair.getSecond();
if (pair == null) {
method = lookupMethod(object);
cache.put(object.getClass().getName(), new Pair<>(object.getClass(), method));
if (onPut != null)
onPut.accept(object.getClass().getName());
}
return method;
} | class MethodCache {
private final String methodName;
private final CopyOnWriteHashMap<String, Method> cache = new CopyOnWriteHashMap<>();
public MethodCache(String methodName) {
this.methodName = methodName;
}
/*
Clear all cached methods. Might be a wise thing to do, if you have cached some methods
that have changed due to new bundles being reloaded.
*/
public void clear() {
cache.clear();
}
public Method get(Object object) {
return get(object, null);
}
private Method lookupMethod(Object object) {
try {
return object.getClass().getMethod(methodName);
} catch (NoSuchMethodException e) {
return null;
}
}
} | class MethodCache {
private final String methodName;
private final CopyOnWriteHashMap<String, Pair<Class<?>, Method>> cache = new CopyOnWriteHashMap<>();
public MethodCache(String methodName) {
this.methodName = methodName;
}
/*
Clear all cached methods. Might be a wise thing to do, if you have cached some methods
that have changed due to new bundles being reloaded.
*/
public void clear() {
cache.clear();
}
public Method get(Object object) {
return get(object, null);
}
private Method lookupMethod(Object object) {
try {
return object.getClass().getMethod(methodName);
} catch (NoSuchMethodException e) {
return null;
}
}
} |
I think this response generating code would look nice pulled out into its own function to have a less hefty catch-block | public ContentChannel handleRequest(Request req, ResponseHandler responseHandler) {
var capabilityRequiringHandler =
DelegatedRequestHandler.resolve(CapabilityRequiringRequestHandler.class, wrapped).orElse(null);
var requiredCapabilities = capabilityRequiringHandler != null
? capabilityRequiringHandler.requiredCapabilities(new View(req))
: CapabilityRequiringRequestHandler.DEFAULT_REQUIRED_CAPABILITIES;
var authCtx = Optional.ofNullable(req.context().get(RequestUtils.JDISC_REQUEST_SSLSESSION))
.flatMap(s -> TransportSecurityUtils.getConnectionAuthContext((SSLSession) s))
.orElse(null);
if (authCtx != null) {
var peer = Optional.ofNullable(((HttpRequest)req).getRemoteAddress())
.map(Object::toString).orElse("<unknown>");
String method = ((HttpRequest) req).getMethod().name();
try {
authCtx.verifyCapabilities(requiredCapabilities, method, req.getUri().getPath(), peer);
} catch (MissingCapabilitiesException e) {
int code = HttpResponse.Status.FORBIDDEN;
var resp = new Response(code);
resp.headers().add("Content-Type", "application/json");
ContentChannel ch = responseHandler.handleResponse(resp);
var slime = new Slime();
var root = slime.setObject();
root.setString("error-code", Integer.toString(code));
root.setString("message", "Missing required capabilities");
ch.write(ByteBuffer.wrap(uncheck(() -> SlimeUtils.toJsonBytes(slime))), null);
ch.close(null);
return null;
}
}
return wrapped.handleRequest(req, responseHandler);
} | ch.close(null); | public ContentChannel handleRequest(Request req, ResponseHandler responseHandler) {
var capabilityRequiringHandler =
DelegatedRequestHandler.resolve(CapabilityRequiringRequestHandler.class, wrapped).orElse(null);
var requiredCapabilities = capabilityRequiringHandler != null
? capabilityRequiringHandler.requiredCapabilities(new View(req))
: CapabilityRequiringRequestHandler.DEFAULT_REQUIRED_CAPABILITIES;
var authCtx = Optional.ofNullable(req.context().get(RequestUtils.JDISC_REQUEST_SSLSESSION))
.flatMap(s -> TransportSecurityUtils.getConnectionAuthContext((SSLSession) s))
.orElse(null);
if (authCtx != null) {
var peer = Optional.ofNullable(((HttpRequest)req).getRemoteAddress())
.map(Object::toString).orElse("<unknown>");
String method = ((HttpRequest) req).getMethod().name();
try {
authCtx.verifyCapabilities(requiredCapabilities, method, req.getUri().getPath(), peer);
} catch (MissingCapabilitiesException e) {
int code = HttpResponse.Status.FORBIDDEN;
var resp = new Response(code);
resp.headers().add("Content-Type", "application/json");
ContentChannel ch = responseHandler.handleResponse(resp);
var slime = new Slime();
var root = slime.setObject();
root.setString("error-code", Integer.toString(code));
root.setString("message", "Missing required capabilities");
ch.write(ByteBuffer.wrap(uncheck(() -> SlimeUtils.toJsonBytes(slime))), null);
ch.close(null);
return null;
}
}
return wrapped.handleRequest(req, responseHandler);
} | class CapabilityEnforcingRequestHandler implements DelegatedRequestHandler {
private final RequestHandler wrapped;
CapabilityEnforcingRequestHandler(RequestHandler wrapped) { this.wrapped = wrapped; }
@Override
@Override public void release() { wrapped.release(); }
@Override public RequestHandler getDelegate() { return wrapped; }
@Override public void handleTimeout(Request request, ResponseHandler handler) { wrapped.handleRequest(request, handler); }
@Override public ResourceReference refer() { return wrapped.refer(); }
@Override public ResourceReference refer(Object context) { return wrapped.refer(context); }
private static class View implements RequestView {
private final HttpRequest req;
View(Request req) { this.req = (HttpRequest) req; }
@Override public HttpRequest.Method method() { return req.getMethod(); }
@Override public URI uri() { return req.getUri(); }
}
} | class CapabilityEnforcingRequestHandler implements DelegatedRequestHandler {
private final RequestHandler wrapped;
CapabilityEnforcingRequestHandler(RequestHandler wrapped) { this.wrapped = wrapped; }
@Override
@Override public void release() { wrapped.release(); }
@Override public RequestHandler getDelegate() { return wrapped; }
@Override public void handleTimeout(Request request, ResponseHandler handler) { wrapped.handleRequest(request, handler); }
@Override public ResourceReference refer() { return wrapped.refer(); }
@Override public ResourceReference refer(Object context) { return wrapped.refer(context); }
private static class View implements RequestView {
private final HttpRequest req;
View(Request req) { this.req = (HttpRequest) req; }
@Override public HttpRequest.Method method() { return req.getMethod(); }
@Override public URI uri() { return req.getUri(); }
}
} |
You need a comment similar to the one above to explain the dropping of the cache. | public Method get(Object object, Consumer<String> onPut) {
Pair<Class<?>, Method> pair = cache.get(object.getClass().getName());
if (pair != null && pair.getFirst() != object.getClass()) {
cache.clear();
pair = null;
}
Method method = pair == null ? null : pair.getSecond();
if (pair == null) {
method = lookupMethod(object);
cache.put(object.getClass().getName(), new Pair<>(object.getClass(), method));
if (onPut != null)
onPut.accept(object.getClass().getName());
}
return method;
} | if (pair != null && pair.getFirst() != object.getClass()) { | public Method get(Object object, Consumer<String> onPut) {
Pair<Class<?>, Method> pair = cache.get(object.getClass().getName());
if (pair != null && pair.getFirst() != object.getClass()) {
cache.clear();
pair = null;
}
Method method = pair == null ? null : pair.getSecond();
if (pair == null) {
method = lookupMethod(object);
cache.put(object.getClass().getName(), new Pair<>(object.getClass(), method));
if (onPut != null)
onPut.accept(object.getClass().getName());
}
return method;
} | class MethodCache {
private final String methodName;
private final CopyOnWriteHashMap<String, Pair<Class<?>, Method>> cache = new CopyOnWriteHashMap<>();
public MethodCache(String methodName) {
this.methodName = methodName;
}
/*
Clear all cached methods. Might be a wise thing to do, if you have cached some methods
that have changed due to new bundles being reloaded.
*/
public void clear() {
cache.clear();
}
public Method get(Object object) {
return get(object, null);
}
private Method lookupMethod(Object object) {
try {
return object.getClass().getMethod(methodName);
} catch (NoSuchMethodException e) {
return null;
}
}
} | class MethodCache {
private final String methodName;
private final CopyOnWriteHashMap<String, Pair<Class<?>, Method>> cache = new CopyOnWriteHashMap<>();
public MethodCache(String methodName) {
this.methodName = methodName;
}
/*
Clear all cached methods. Might be a wise thing to do, if you have cached some methods
that have changed due to new bundles being reloaded.
*/
public void clear() {
cache.clear();
}
public Method get(Object object) {
return get(object, null);
}
private Method lookupMethod(Object object) {
try {
return object.getClass().getMethod(methodName);
} catch (NoSuchMethodException e) {
return null;
}
}
} |
```suggestion // When changing bundles, you might end up having cached the old method pointing to the old bundle. // That will then lead to a class cast exception when invoking the wrong clone method. // Whenever we detect a new class with the same name, we therefore drop the entire cache. // This is also the reason for caching the pair of method and original class—not just the method. if (pair != null && pair.getFirst() != object.getClass()) { ``` | public Method get(Object object, Consumer<String> onPut) {
Pair<Class<?>, Method> pair = cache.get(object.getClass().getName());
if (pair != null && pair.getFirst() != object.getClass()) {
cache.clear();
pair = null;
}
Method method = pair == null ? null : pair.getSecond();
if (pair == null) {
method = lookupMethod(object);
cache.put(object.getClass().getName(), new Pair<>(object.getClass(), method));
if (onPut != null)
onPut.accept(object.getClass().getName());
}
return method;
} | if (pair != null && pair.getFirst() != object.getClass()) { | public Method get(Object object, Consumer<String> onPut) {
Pair<Class<?>, Method> pair = cache.get(object.getClass().getName());
if (pair != null && pair.getFirst() != object.getClass()) {
cache.clear();
pair = null;
}
Method method = pair == null ? null : pair.getSecond();
if (pair == null) {
method = lookupMethod(object);
cache.put(object.getClass().getName(), new Pair<>(object.getClass(), method));
if (onPut != null)
onPut.accept(object.getClass().getName());
}
return method;
} | class MethodCache {
private final String methodName;
private final CopyOnWriteHashMap<String, Pair<Class<?>, Method>> cache = new CopyOnWriteHashMap<>();
public MethodCache(String methodName) {
this.methodName = methodName;
}
/*
Clear all cached methods. Might be a wise thing to do, if you have cached some methods
that have changed due to new bundles being reloaded.
*/
public void clear() {
cache.clear();
}
public Method get(Object object) {
return get(object, null);
}
private Method lookupMethod(Object object) {
try {
return object.getClass().getMethod(methodName);
} catch (NoSuchMethodException e) {
return null;
}
}
} | class MethodCache {
private final String methodName;
private final CopyOnWriteHashMap<String, Pair<Class<?>, Method>> cache = new CopyOnWriteHashMap<>();
public MethodCache(String methodName) {
this.methodName = methodName;
}
/*
Clear all cached methods. Might be a wise thing to do, if you have cached some methods
that have changed due to new bundles being reloaded.
*/
public void clear() {
cache.clear();
}
public Method get(Object object) {
return get(object, null);
}
private Method lookupMethod(Object object) {
try {
return object.getClass().getMethod(methodName);
} catch (NoSuchMethodException e) {
return null;
}
}
} |
Call it `athenzDomain`? | private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
for (var endpoint : zoneEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application)
.targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
for (var endpoint : declaredEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", application.revisions().get(deployment.revision()).stringId());
response.setLong("build", deployment.revision().number());
Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment);
response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli()));
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> {
Cursor enclave = response.setObject("enclave");
enclave.setString("cloudAccount", cloudAccount.value());
enclave.setString("athensDomain", controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).value());
});
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
if (!deployment.zone().environment().isManuallyDeployed()) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone()));
Optional.ofNullable(status.jobSteps().get(jobId))
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision()));
if ( ! status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else
response.setString("status", "running");
});
} else {
var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId()));
deploymentRun.ifPresent(run -> {
response.setString("status", run.hasEnded() ? "complete" : "running");
});
}
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
} | enclave.setString("athensDomain", controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).value()); | private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
for (var endpoint : zoneEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application)
.targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
for (var endpoint : declaredEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", application.revisions().get(deployment.revision()).stringId());
response.setLong("build", deployment.revision().number());
Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment);
response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli()));
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> {
Cursor enclave = response.setObject("enclave");
enclave.setString("cloudAccount", cloudAccount.value());
enclave.setString("athensDomain", controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).value());
});
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
if (!deployment.zone().environment().isManuallyDeployed()) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone()));
Optional.ofNullable(status.jobSteps().get(jobId))
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision()));
if ( ! status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else
response.setString("status", "running");
});
} else {
var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId()));
deploymentRun.ifPresent(run -> {
response.setString("status", run.hasEnded() ? "complete" : "running");
});
}
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
return switch (request.getMethod()) {
case GET: yield handleGET(path, request);
case PUT: yield handlePUT(path, request);
case POST: yield handlePOST(path, request);
case PATCH: yield handlePATCH(path, request);
case DELETE: yield handleDELETE(path, request);
case OPTIONS: yield handleOPTIONS();
default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
};
}
catch (RestApiException.Forbidden e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (RestApiException.Unauthorized e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return switch (e.code()) {
case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR: yield ErrorResponses.logThrowing(request, log, e);
default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
};
}
catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse accessRequests(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var accessControlService = controller.serviceRegistry().accessControlService();
var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant);
var managedAccess = accessControlService.getManagedAccess(tenant);
var slime = new Slime();
var cursor = slime.setObject();
cursor.setBool("managedAccess", managedAccess);
accessRoleInformation.getPendingRequest()
.ifPresent(membershipRequest -> {
var requestCursor = cursor.setObject("pendingRequest");
requestCursor.setString("requestTime", membershipRequest.getCreationTime());
requestCursor.setString("reason", membershipRequest.getReason());
});
var auditLogCursor = cursor.setArray("auditLog");
accessRoleInformation.getAuditLog()
.forEach(auditLogEntry -> {
var entryCursor = auditLogCursor.addObject();
entryCursor.setString("created", auditLogEntry.getCreationTime());
entryCursor.setString("approver", auditLogEntry.getApprover());
entryCursor.setString("reason", auditLogEntry.getReason());
entryCursor.setString("status", auditLogEntry.getAction());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse requestSshAccess(String tenantName, HttpRequest request) {
if (!isOperator(request)) {
return ErrorResponse.forbidden("Only operators are allowed to request ssh access");
}
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only request access for cloud tenants");
controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName));
return new MessageResponse("OK");
}
private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var inspector = toSlime(request.getData()).get();
var expiry = inspector.field("expiry").valid() ?
Instant.ofEpochMilli(inspector.field("expiry").asLong()) :
Instant.now().plus(1, ChronoUnit.DAYS);
var approve = inspector.field("approve").asBool();
controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve);
return new MessageResponse("OK");
}
private HttpResponse addManagedAccess(String tenantName) {
return setManagedAccess(tenantName, true);
}
private HttpResponse removeManagedAccess(String tenantName) {
return setManagedAccess(tenantName, false);
}
private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only set access privel for cloud tenants");
controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess);
var slime = new Slime();
slime.setObject().setBool("managedAccess", managedAccess);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> handler.apply((CloudTenant) tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var contact = root.setObject("contact");
contact.setString("name", info.contact().name());
contact.setString("email", info.contact().email());
var tenant = root.setObject("tenant");
tenant.setString("company", info.name());
tenant.setString("website", info.website());
toSlime(info.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) {
return controller.tenants().get(tenantName)
.map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get()))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var mergedContact = TenantContact.empty()
.withName(getString(inspector.field("contact").field("name"), info.contact().name()))
.withEmail(getString(inspector.field("contact").field("email"), info.contact().email()));
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address());
var mergedInfo = info
.withName(getString(inspector.field("tenant").field("name"), info.name()))
.withWebsite(getString(inspector.field("tenant").field("website"), info.website()))
.withContact(mergedContact)
.withAddress(mergedAddress);
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var billingContact = info.billingContact();
var contact = root.setObject("contact");
contact.setString("name", billingContact.contact().name());
contact.setString("email", billingContact.contact().email());
contact.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var contact = info.billingContact().contact();
var address = info.billingContact().address();
var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address());
var mergedBilling = info.billingContact()
.withContact(mergedContact)
.withAddress(mergedAddress);
var mergedInfo = info.withBilling(mergedBilling);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
toSlime(cloudTenant.info().contacts(), root);
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) {
var mergedInfo = cloudTenant.info()
.withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts()));
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private void validateMergedTenantInfo(TenantInfo mergedInfo) {
if (mergedInfo.contact().name().isBlank()) {
throw new IllegalArgumentException("'contactName' cannot be empty");
}
if (mergedInfo.contact().email().isBlank()) {
throw new IllegalArgumentException("'contactEmail' cannot be empty");
}
if (! mergedInfo.contact().email().contains("@")) {
throw new IllegalArgumentException("'contactEmail' needs to be an email address");
}
if (! mergedInfo.website().isBlank()) {
try {
new URL(mergedInfo.website());
} catch (MalformedURLException e) {
throw new IllegalArgumentException("'website' needs to be a valid address");
}
}
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
Cursor contactsCursor = parentCursor.setArray("contacts");
contacts.all().forEach(contact -> {
Cursor contactCursor = contactsCursor.addObject();
Cursor audiencesArray = contactCursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
contactCursor.setString("email", email.email());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
});
}
private static TenantContacts.Audience fromAudience(String value) {
return switch (value) {
case "tenant": yield TenantContacts.Audience.TENANT;
case "notifications": yield TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
};
}
private static String toAudience(TenantContacts.Audience audience) {
return switch (audience) {
case TENANT: yield "tenant";
case NOTIFICATIONS: yield "notifications";
};
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString().trim() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantContact mergedContact = TenantContact.empty()
.withName(getString(insp.field("contactName"), oldInfo.contact().name()))
.withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email()));
TenantInfo mergedInfo = TenantInfo.empty()
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.website()))
.withContact(mergedContact)
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()))
.withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts()));
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) {
if (!insp.valid()) return oldAddress;
TenantAddress address = TenantAddress.empty()
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code()))
.withAddress(getString(insp.field("addressLines"), oldAddress.address()));
List<String> fields = List.of(address.address(),
address.code(),
address.country(),
address.city(),
address.region());
if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank))
return address;
throw new IllegalArgumentException("All address fields must be set");
}
private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) {
if (!insp.valid()) return oldContact;
String email = getString(insp.field("email"), oldContact.email());
if (!email.isBlank() && !email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return TenantContact.empty()
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()));
}
private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) {
if (!insp.valid()) return oldContact;
return TenantBilling.empty()
.withContact(updateTenantInfoContact(insp, oldContact.contact()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) {
if (!insp.valid()) return oldContacts;
List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> {
String email = inspector.field("email").asString().trim();
List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> fromAudience(audience.asString()))
.toList();
if (!email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return new TenantContacts.EmailContact(audiences, email);
}).toList();
return new TenantContacts(contacts);
}
private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) {
boolean productionOnly = showOnlyProductionInstances(request);
boolean excludeMessages = "true".equals(request.getProperty("excludeMessages"));
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
tenant.map(t -> Stream.of(TenantName.from(t)))
.orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream())
.flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream())
.filter(notification ->
propertyEquals(request, "application", ApplicationName::from, notification.source().application()) &&
propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) &&
propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) &&
propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) &&
propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) &&
propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level())))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages));
return new SlimeJsonResponse(slime);
}
private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) {
return Optional.ofNullable(request.getProperty(property))
.map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get()))
.orElse(true);
}
private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
if (!excludeMessages) {
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
}
if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value());
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
return switch (type) {
case submission, applicationPackage: yield "applicationPackage";
case testPackage: yield "testPackage";
case deployment: yield "deployment";
case feedBlock: yield "feedBlock";
case reindex: yield "reindex";
};
}
private static String notificationLevelAsString(Notification.Level level) {
return switch (level) {
case info: yield "info";
case warning: yield "warning";
case error: yield "error";
};
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
getTenantOrThrow(tenantName);
List<Application> applications = applicationName.isEmpty() ?
controller.applications().asList(tenant) :
controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get()))
.map(List::of)
.orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist"));
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : applications) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
ZoneId zone = type.zone();
RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision();
byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse devApplicationPackageDiff(RunId runId) {
DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone());
return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
final long build;
String requestedBuild = request.getProperty("build");
if (requestedBuild != null) {
if (requestedBuild.equals("latestDeployed")) {
build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision()
.map(RevisionId::number)
.orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication));
} else {
try {
build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("invalid value for request parameter 'build'", e);
}
}
} else {
build = controller.applications().requireApplication(tenantAndApplication).revisions().last()
.map(version -> version.id().number())
.orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication));
}
RevisionId revision = RevisionId.forProduction(build);
boolean tests = request.getBooleanProperty("tests");
byte[] applicationPackage = tests ?
controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) :
controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision);
String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip";
return new ZipResponse(filename, applicationPackage);
}
private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) {
Slime slime = new Slime();
OptionalInt allowMajor = OptionalInt.empty();
if (allowMajorParam != null) {
try {
allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e);
}
}
Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor);
slime.setObject().setString("compileVersion", compileVersion.toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
if (!applicationId.tenant().equals(TenantName.from(tenantName)))
return ErrorResponse.badRequest("Invalid application id");
var zoneId = requireZone(ZoneId.from(request.getProperty("zone")));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("AWS archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeAwsArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var member = mandatory("member", data).asString();
if (member.isBlank()) {
return ErrorResponse.badRequest("GCP archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + ".");
}
private HttpResponse removeGcpArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id));
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor));
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.down());
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
nodeObject.setString("group", node.group());
nodeObject.setLong("index", node.index());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
return switch (state) {
case failed: yield "failed";
case parked: yield "parked";
case dirty: yield "dirty";
case ready: yield "ready";
case active: yield "active";
case inactive: yield "inactive";
case reserved: yield "reserved";
case provisioned: yield "provisioned";
case breakfixed: yield "breakfixed";
case deprovisioned: yield "deprovisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
};
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
return switch (type) {
case admin: yield "admin";
case content: yield "content";
case container: yield "container";
case combined: yield "combined";
case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
};
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
return switch (diskSpeed) {
case fast : yield "fast";
case slow : yield "slow";
case any : yield "any";
};
}
private static String valueOf(NodeResources.StorageType storageType) {
return switch (storageType) {
case remote : yield "remote";
case local : yield "local";
case any : yield "any";
};
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
@Override
public long maxPendingBytes() {
return 1 << 26;
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
var from = Optional.ofNullable(request.getProperty("from"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.EPOCH);
var until = Optional.ofNullable(request.getProperty("until"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.now(controller.clock()));
var application = ApplicationId.from(tenantName, applicationName, instanceName);
var zone = requireZone(environment, region);
var deployment = new DeploymentId(application, zone);
var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment);
var slime = new Slime();
var root = slime.setObject();
for (var entry : events.entrySet()) {
var serviceRoot = root.setArray(entry.getKey().clusterId().value());
scalingEventsToSlime(entry.getValue(), serviceRoot);
}
return new SlimeJsonResponse(slime);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
boolean upgradeRevision = ! requestObject.field("skipRevision").asBool();
boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") +
(upgradeRevision ? "" : "revision") +
( ! upgradeRevision && ! upgradePlatform ? " and " : "") +
(upgradePlatform ? "" : "platform") +
( ! upgradeRevision || ! upgradePlatform ? " upgrade" : "");
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id + suppressedUpgrades);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), status.application());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application());
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
addRotationId(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void addRotationId(Cursor object, Instance instance) {
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.revisions().last().ifPresent(version -> {
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
addRotationId(object, instance);
List<Deployment> deployments = application.deploymentSpec().instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
Stream.concat(status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment()),
controller.jobController().active(instance.id()).stream()
.map(run -> run.id().job())
.filter(job -> job.type().environment().isManuallyDeployed()))
.map(job -> job.type().zone())
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change, Application application) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision)));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
DeploymentId deploymentId = new DeploymentId(instance.id(), zone);
RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant;
RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out;
controller.routing().of(deploymentId).setRoutingStatus(status, agent);
return new MessageResponse(Text.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId())
.requiresRotation()
.primary();
if (primaryEndpoint.isPresent()) {
DeploymentRoutingContext context = controller.routing().of(deploymentId);
RoutingStatus status = context.routingStatus();
array.addString(primaryEndpoint.get().upstreamName(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.value().name());
statusObject.setString("reason", "");
statusObject.setString("agent", status.agent().name());
statusObject.setLong("timestamp", status.changedAt().getEpochSecond());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId,
serviceName,
DomainName.of(host),
HttpURL.Path.parse("/status").append(restPath),
Query.empty().add(request.getJDiscRequest().parameters()));
}
private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodes(deploymentId);
}
private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Query query = Query.empty().add(request.getJDiscRequest().parameters());
query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString());
return controller.serviceRegistry().configServer().getServiceNodePage(
deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query);
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
if (controller.system().isPublic()) {
User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
TenantInfo info = controller.tenants().require(tenant, CloudTenant.class)
.info()
.withContact(TenantContact.from(user.name(), user.email()));
controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(info);
controller.tenants().store(lockedTenant);
});
}
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName), Tags.empty());
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if ( ! versionStatus.isActive(version) && ! isOperator(request))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Inspector buildField = toSlime(request.getData()).get().field("build");
long build = buildField.valid() ? buildField.asLong() : -1;
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
RevisionId revision = build == -1 ? application.get().revisions().last().get().id()
: getRevision(application.get(), build);
Change change = Change.of(revision);
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
private RevisionId getRevision(Application application, long build) {
return application.revisions().withPackage().stream()
.map(ApplicationVersion::id)
.filter(version -> version.number() == build)
.findFirst()
.filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(),
application.id().application(),
build))
.orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found"));
}
private HttpResponse cancelBuild(String tenantName, String applicationName, String build){
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
RevisionId revision = RevisionId.forProduction(Long.parseLong(build));
controller.applications().lockApplicationOrThrow(id, application -> {
controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped())));
for (Instance instance : application.get().instances().values())
if (instance.change().revision().equals(Optional.of(revision)))
controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION);
});
return new MessageResponse("Marked build '" + build + "' as non-deployable");
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.toList();
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.toList();
Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null;
boolean indexedOnly = request.getBooleanProperty("indexedOnly");
controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed);
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) +
(indexedOnly ? ", for indexed types" : "") +
(speed != null ? ", with speed " + speed : ""));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed));
}
private static String toString(ApplicationReindexing.State state) {
return switch (state) {
case PENDING: yield "pending";
case RUNNING: yield "running";
case FAILED: yield "failed";
case SUCCESSFUL: yield "successful";
};
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone()),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
ensureApplicationExists(TenantAndApplicationId.from(id), request);
boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("dryRun", options))
.map(Boolean::valueOf)
.orElse(false);
controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request));
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if ( ! vespaVersion.isEmpty()) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
DeploymentResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber());
Cursor logArray = root.setArray("prepareMessages");
for (LogEntry logMessage : result.log()) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.epochMillis());
logObject.setString("level", logMessage.level().getName());
logObject.setString("message", logMessage.message());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
boolean forget = request.getBooleanProperty("forget");
if (forget && ! isOperator(request))
return ErrorResponse.forbidden("Only operators can forget a tenant");
controller.tenants().delete(TenantName.from(tenantName),
Optional.of(accessControlRequests.credentials(TenantName.from(tenantName),
toSlime(request.getData()).get(),
request.getJDiscRequest())),
forget);
return new MessageResponse("Deleted tenant " + tenantName);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId()))
.filter(run -> ! run.hasEnded())
.ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName()));
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(prodInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(prodInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
ApplicationId toTest = type.isProduction() ? prodInstanceId : id;
if ( ! type.isProduction())
deployments.add(new DeploymentId(toTest, type.zone()));
Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone());
if (deployment == null)
throw new NotExistsException(toTest + " is not deployed in " + type.zone());
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
deployment.version(),
deployment.revision(),
deployment.at(),
controller.routing().readTestRunnerEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null);
if (report != null) {
Cursor cursor = report.get();
boolean force = request.getBooleanProperty("force");
if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) {
throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString());
}
}
Slime requestPayload;
try {
requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes());
} catch (Exception e) {
throw new IllegalArgumentException("Missing or invalid JSON in request content", e);
}
Cursor requestPayloadCursor = requestPayload.get();
String configId = requestPayloadCursor.field("configId").asString();
long expiresAt = requestPayloadCursor.field("expiresAt").asLong();
if (configId.isEmpty()) {
throw new IllegalArgumentException("Missing configId");
}
Cursor artifactsCursor = requestPayloadCursor.field("artifacts");
int artifactEntries = artifactsCursor.entries();
if (artifactEntries == 0) {
throw new IllegalArgumentException("Missing or empty 'artifacts'");
}
Slime dumpRequest = new Slime();
Cursor dumpRequestCursor = dumpRequest.setObject();
dumpRequestCursor.setLong("createdMillis", controller.clock().millis());
dumpRequestCursor.setString("configId", configId);
Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts");
for (int i = 0; i < artifactEntries; i++) {
dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString());
}
if (expiresAt > 0) {
dumpRequestCursor.setLong("expiresAt", expiresAt);
}
Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions");
if (dumpOptionsCursor.children() > 0) {
SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions"));
}
var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest))));
nodeRepository.updateReports(zone, hostname, reportsUpdate);
boolean wait = request.getBooleanProperty("wait");
if (!wait) return new MessageResponse("Request created");
return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname);
}
private HttpResponse getServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname)
.orElseThrow(() -> new NotExistsException("No service dump for node " + hostname));
return new SlimeJsonResponse(report);
}
private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
int pollInterval = 2;
Slime report;
while (true) {
report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get();
Cursor cursor = report.get();
if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) {
break;
}
final Slime copyForLambda = report;
log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda))));
log.fine("Sleeping " + pollInterval + " seconds before checking report status again");
controller.sleeper().sleep(Duration.ofSeconds(pollInterval));
}
return new SlimeJsonResponse(report);
}
private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
Node node;
try {
node = nodeRepository.getNode(zone, hostname);
} catch (IllegalArgumentException e) {
throw new NotExistsException(hostname);
}
ApplicationId app = ApplicationId.from(tenant, application, instance);
ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner"));
if (!app.equals(owner)) {
throw new IllegalArgumentException("Node is not owned by " + app.toFullString());
}
String json = node.reports().get("serviceDump");
if (json == null) return Optional.empty();
return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
try {
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(object.setObject("quota"), usedQuota);
} catch (Exception e) {
log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e)));
}
cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role));
toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess"));
break;
}
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values();
if (instances.isEmpty() && !showOnlyActiveInstances(request))
toSlime(application.id(), applicationArray.addObject(), request);
for (Instance instance : instances) {
if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
} else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(ArchiveAccess archiveAccess, Cursor object) {
archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role));
archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member));
}
private void toSlime(Cursor object, QuotaUsage usage) {
object.setDouble("budgetUsed", usage.rate());
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("peakCpu", utilization.peakCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("peakMemory", utilization.peakMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
utilizationObject.setDouble("peakDisk", utilization.peakDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream()
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment)))
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
if (tenant.type() == Tenant.Type.deleted)
object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new IllegalArgumentException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static boolean showOnlyActiveInstances(HttpRequest request) {
return "true".equals(request.getProperty("activeInstances"));
}
private static boolean includeDeleted(HttpRequest request) {
return "true".equals(request.getProperty("includeDeleted"));
}
private static String tenantType(Tenant tenant) {
return switch (tenant.type()) {
case athenz: yield "ATHENS";
case cloud: yield "CLOUD";
case deleted: yield "DELETED";
};
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry());
}
private RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong();
projectId = projectId == 0 ? 1 : projectId;
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
Optional<String> description = optional("description", submitOptions);
int risk = (int) submitOptions.field("risk").asLong();
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application);
ensureApplicationExists(id, request);
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId);
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), 0),
0);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
return requireZone(ZoneId.from(environment, region));
}
private ZoneId requireZone(ZoneId zone) {
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
return switch (state) {
case in: yield "IN";
case out: yield "OUT";
case unknown: yield "UNKNOWN";
};
}
private static String endpointScopeString(Endpoint.Scope scope) {
return switch (scope) {
case weighted: yield "weighted";
case application: yield "application";
case global: yield "global";
case zone: yield "zone";
};
}
private static String routingMethodString(RoutingMethod method) {
return switch (method) {
case exclusive: yield "exclusive";
case sharedLayer4: yield "sharedLayer4";
};
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) {
if (controller.applications().getApplication(id).isEmpty()) {
if (controller.system().isPublic() || hasOktaContext(request)) {
log.fine("Application does not exist in public, creating: " + id);
var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest());
controller.applications().createApplication(id, credentials);
} else {
log.fine("Application does not exist in hosted, failing: " + id);
throw new IllegalArgumentException("Application does not exist. Create application in Console first.");
}
}
}
private boolean hasOktaContext(HttpRequest request) {
try {
OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context());
return true;
} catch (IllegalArgumentException e) {
return false;
}
}
private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) {
List<ZoneId> productionZones = spec.zones().stream()
.filter(z -> z.region().isPresent())
.map(z -> ZoneId.from(z.environment(), z.region().get()))
.toList();
return deployments.stream()
.sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone())))
.collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
return switch (request.getMethod()) {
case GET: yield handleGET(path, request);
case PUT: yield handlePUT(path, request);
case POST: yield handlePOST(path, request);
case PATCH: yield handlePATCH(path, request);
case DELETE: yield handleDELETE(path, request);
case OPTIONS: yield handleOPTIONS();
default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
};
}
catch (RestApiException.Forbidden e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (RestApiException.Unauthorized e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return switch (e.code()) {
case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR: yield ErrorResponses.logThrowing(request, log, e);
default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
};
}
catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse accessRequests(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var accessControlService = controller.serviceRegistry().accessControlService();
var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant);
var managedAccess = accessControlService.getManagedAccess(tenant);
var slime = new Slime();
var cursor = slime.setObject();
cursor.setBool("managedAccess", managedAccess);
accessRoleInformation.getPendingRequest()
.ifPresent(membershipRequest -> {
var requestCursor = cursor.setObject("pendingRequest");
requestCursor.setString("requestTime", membershipRequest.getCreationTime());
requestCursor.setString("reason", membershipRequest.getReason());
});
var auditLogCursor = cursor.setArray("auditLog");
accessRoleInformation.getAuditLog()
.forEach(auditLogEntry -> {
var entryCursor = auditLogCursor.addObject();
entryCursor.setString("created", auditLogEntry.getCreationTime());
entryCursor.setString("approver", auditLogEntry.getApprover());
entryCursor.setString("reason", auditLogEntry.getReason());
entryCursor.setString("status", auditLogEntry.getAction());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse requestSshAccess(String tenantName, HttpRequest request) {
if (!isOperator(request)) {
return ErrorResponse.forbidden("Only operators are allowed to request ssh access");
}
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only request access for cloud tenants");
controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName));
return new MessageResponse("OK");
}
private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var inspector = toSlime(request.getData()).get();
var expiry = inspector.field("expiry").valid() ?
Instant.ofEpochMilli(inspector.field("expiry").asLong()) :
Instant.now().plus(1, ChronoUnit.DAYS);
var approve = inspector.field("approve").asBool();
controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve);
return new MessageResponse("OK");
}
private HttpResponse addManagedAccess(String tenantName) {
return setManagedAccess(tenantName, true);
}
private HttpResponse removeManagedAccess(String tenantName) {
return setManagedAccess(tenantName, false);
}
private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only set access privel for cloud tenants");
controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess);
var slime = new Slime();
slime.setObject().setBool("managedAccess", managedAccess);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> handler.apply((CloudTenant) tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var contact = root.setObject("contact");
contact.setString("name", info.contact().name());
contact.setString("email", info.contact().email());
var tenant = root.setObject("tenant");
tenant.setString("company", info.name());
tenant.setString("website", info.website());
toSlime(info.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) {
return controller.tenants().get(tenantName)
.map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get()))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var mergedContact = TenantContact.empty()
.withName(getString(inspector.field("contact").field("name"), info.contact().name()))
.withEmail(getString(inspector.field("contact").field("email"), info.contact().email()));
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address());
var mergedInfo = info
.withName(getString(inspector.field("tenant").field("name"), info.name()))
.withWebsite(getString(inspector.field("tenant").field("website"), info.website()))
.withContact(mergedContact)
.withAddress(mergedAddress);
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var billingContact = info.billingContact();
var contact = root.setObject("contact");
contact.setString("name", billingContact.contact().name());
contact.setString("email", billingContact.contact().email());
contact.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var contact = info.billingContact().contact();
var address = info.billingContact().address();
var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address());
var mergedBilling = info.billingContact()
.withContact(mergedContact)
.withAddress(mergedAddress);
var mergedInfo = info.withBilling(mergedBilling);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
toSlime(cloudTenant.info().contacts(), root);
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) {
var mergedInfo = cloudTenant.info()
.withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts()));
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private void validateMergedTenantInfo(TenantInfo mergedInfo) {
if (mergedInfo.contact().name().isBlank()) {
throw new IllegalArgumentException("'contactName' cannot be empty");
}
if (mergedInfo.contact().email().isBlank()) {
throw new IllegalArgumentException("'contactEmail' cannot be empty");
}
if (! mergedInfo.contact().email().contains("@")) {
throw new IllegalArgumentException("'contactEmail' needs to be an email address");
}
if (! mergedInfo.website().isBlank()) {
try {
new URL(mergedInfo.website());
} catch (MalformedURLException e) {
throw new IllegalArgumentException("'website' needs to be a valid address");
}
}
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
Cursor contactsCursor = parentCursor.setArray("contacts");
contacts.all().forEach(contact -> {
Cursor contactCursor = contactsCursor.addObject();
Cursor audiencesArray = contactCursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
contactCursor.setString("email", email.email());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
});
}
private static TenantContacts.Audience fromAudience(String value) {
return switch (value) {
case "tenant": yield TenantContacts.Audience.TENANT;
case "notifications": yield TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
};
}
private static String toAudience(TenantContacts.Audience audience) {
return switch (audience) {
case TENANT: yield "tenant";
case NOTIFICATIONS: yield "notifications";
};
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString().trim() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantContact mergedContact = TenantContact.empty()
.withName(getString(insp.field("contactName"), oldInfo.contact().name()))
.withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email()));
TenantInfo mergedInfo = TenantInfo.empty()
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.website()))
.withContact(mergedContact)
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()))
.withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts()));
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) {
if (!insp.valid()) return oldAddress;
TenantAddress address = TenantAddress.empty()
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code()))
.withAddress(getString(insp.field("addressLines"), oldAddress.address()));
List<String> fields = List.of(address.address(),
address.code(),
address.country(),
address.city(),
address.region());
if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank))
return address;
throw new IllegalArgumentException("All address fields must be set");
}
private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) {
if (!insp.valid()) return oldContact;
String email = getString(insp.field("email"), oldContact.email());
if (!email.isBlank() && !email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return TenantContact.empty()
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()));
}
private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) {
if (!insp.valid()) return oldContact;
return TenantBilling.empty()
.withContact(updateTenantInfoContact(insp, oldContact.contact()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) {
if (!insp.valid()) return oldContacts;
List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> {
String email = inspector.field("email").asString().trim();
List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> fromAudience(audience.asString()))
.toList();
if (!email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return new TenantContacts.EmailContact(audiences, email);
}).toList();
return new TenantContacts(contacts);
}
private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) {
boolean productionOnly = showOnlyProductionInstances(request);
boolean excludeMessages = "true".equals(request.getProperty("excludeMessages"));
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
tenant.map(t -> Stream.of(TenantName.from(t)))
.orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream())
.flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream())
.filter(notification ->
propertyEquals(request, "application", ApplicationName::from, notification.source().application()) &&
propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) &&
propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) &&
propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) &&
propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) &&
propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level())))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages));
return new SlimeJsonResponse(slime);
}
private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) {
return Optional.ofNullable(request.getProperty(property))
.map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get()))
.orElse(true);
}
private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
if (!excludeMessages) {
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
}
if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value());
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
return switch (type) {
case submission, applicationPackage: yield "applicationPackage";
case testPackage: yield "testPackage";
case deployment: yield "deployment";
case feedBlock: yield "feedBlock";
case reindex: yield "reindex";
};
}
private static String notificationLevelAsString(Notification.Level level) {
return switch (level) {
case info: yield "info";
case warning: yield "warning";
case error: yield "error";
};
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
getTenantOrThrow(tenantName);
List<Application> applications = applicationName.isEmpty() ?
controller.applications().asList(tenant) :
controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get()))
.map(List::of)
.orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist"));
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : applications) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
ZoneId zone = type.zone();
RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision();
byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse devApplicationPackageDiff(RunId runId) {
DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone());
return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
final long build;
String requestedBuild = request.getProperty("build");
if (requestedBuild != null) {
if (requestedBuild.equals("latestDeployed")) {
build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision()
.map(RevisionId::number)
.orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication));
} else {
try {
build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("invalid value for request parameter 'build'", e);
}
}
} else {
build = controller.applications().requireApplication(tenantAndApplication).revisions().last()
.map(version -> version.id().number())
.orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication));
}
RevisionId revision = RevisionId.forProduction(build);
boolean tests = request.getBooleanProperty("tests");
byte[] applicationPackage = tests ?
controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) :
controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision);
String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip";
return new ZipResponse(filename, applicationPackage);
}
private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) {
Slime slime = new Slime();
OptionalInt allowMajor = OptionalInt.empty();
if (allowMajorParam != null) {
try {
allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e);
}
}
Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor);
slime.setObject().setString("compileVersion", compileVersion.toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
if (!applicationId.tenant().equals(TenantName.from(tenantName)))
return ErrorResponse.badRequest("Invalid application id");
var zoneId = requireZone(ZoneId.from(request.getProperty("zone")));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("AWS archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeAwsArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var member = mandatory("member", data).asString();
if (member.isBlank()) {
return ErrorResponse.badRequest("GCP archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + ".");
}
private HttpResponse removeGcpArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id));
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor));
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.down());
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
nodeObject.setString("group", node.group());
nodeObject.setLong("index", node.index());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
return switch (state) {
case failed: yield "failed";
case parked: yield "parked";
case dirty: yield "dirty";
case ready: yield "ready";
case active: yield "active";
case inactive: yield "inactive";
case reserved: yield "reserved";
case provisioned: yield "provisioned";
case breakfixed: yield "breakfixed";
case deprovisioned: yield "deprovisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
};
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
return switch (type) {
case admin: yield "admin";
case content: yield "content";
case container: yield "container";
case combined: yield "combined";
case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
};
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
return switch (diskSpeed) {
case fast : yield "fast";
case slow : yield "slow";
case any : yield "any";
};
}
private static String valueOf(NodeResources.StorageType storageType) {
return switch (storageType) {
case remote : yield "remote";
case local : yield "local";
case any : yield "any";
};
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
@Override
public long maxPendingBytes() {
return 1 << 26;
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
var from = Optional.ofNullable(request.getProperty("from"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.EPOCH);
var until = Optional.ofNullable(request.getProperty("until"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.now(controller.clock()));
var application = ApplicationId.from(tenantName, applicationName, instanceName);
var zone = requireZone(environment, region);
var deployment = new DeploymentId(application, zone);
var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment);
var slime = new Slime();
var root = slime.setObject();
for (var entry : events.entrySet()) {
var serviceRoot = root.setArray(entry.getKey().clusterId().value());
scalingEventsToSlime(entry.getValue(), serviceRoot);
}
return new SlimeJsonResponse(slime);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
boolean upgradeRevision = ! requestObject.field("skipRevision").asBool();
boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") +
(upgradeRevision ? "" : "revision") +
( ! upgradeRevision && ! upgradePlatform ? " and " : "") +
(upgradePlatform ? "" : "platform") +
( ! upgradeRevision || ! upgradePlatform ? " upgrade" : "");
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id + suppressedUpgrades);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), status.application());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application());
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
addRotationId(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void addRotationId(Cursor object, Instance instance) {
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.revisions().last().ifPresent(version -> {
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
addRotationId(object, instance);
List<Deployment> deployments = application.deploymentSpec().instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
Stream.concat(status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment()),
controller.jobController().active(instance.id()).stream()
.map(run -> run.id().job())
.filter(job -> job.type().environment().isManuallyDeployed()))
.map(job -> job.type().zone())
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change, Application application) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision)));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
DeploymentId deploymentId = new DeploymentId(instance.id(), zone);
RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant;
RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out;
controller.routing().of(deploymentId).setRoutingStatus(status, agent);
return new MessageResponse(Text.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId())
.requiresRotation()
.primary();
if (primaryEndpoint.isPresent()) {
DeploymentRoutingContext context = controller.routing().of(deploymentId);
RoutingStatus status = context.routingStatus();
array.addString(primaryEndpoint.get().upstreamName(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.value().name());
statusObject.setString("reason", "");
statusObject.setString("agent", status.agent().name());
statusObject.setLong("timestamp", status.changedAt().getEpochSecond());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId,
serviceName,
DomainName.of(host),
HttpURL.Path.parse("/status").append(restPath),
Query.empty().add(request.getJDiscRequest().parameters()));
}
private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodes(deploymentId);
}
private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Query query = Query.empty().add(request.getJDiscRequest().parameters());
query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString());
return controller.serviceRegistry().configServer().getServiceNodePage(
deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query);
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
if (controller.system().isPublic()) {
User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
TenantInfo info = controller.tenants().require(tenant, CloudTenant.class)
.info()
.withContact(TenantContact.from(user.name(), user.email()));
controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(info);
controller.tenants().store(lockedTenant);
});
}
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName), Tags.empty());
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if ( ! versionStatus.isActive(version) && ! isOperator(request))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Inspector buildField = toSlime(request.getData()).get().field("build");
long build = buildField.valid() ? buildField.asLong() : -1;
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
RevisionId revision = build == -1 ? application.get().revisions().last().get().id()
: getRevision(application.get(), build);
Change change = Change.of(revision);
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
private RevisionId getRevision(Application application, long build) {
return application.revisions().withPackage().stream()
.map(ApplicationVersion::id)
.filter(version -> version.number() == build)
.findFirst()
.filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(),
application.id().application(),
build))
.orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found"));
}
private HttpResponse cancelBuild(String tenantName, String applicationName, String build){
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
RevisionId revision = RevisionId.forProduction(Long.parseLong(build));
controller.applications().lockApplicationOrThrow(id, application -> {
controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped())));
for (Instance instance : application.get().instances().values())
if (instance.change().revision().equals(Optional.of(revision)))
controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION);
});
return new MessageResponse("Marked build '" + build + "' as non-deployable");
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.toList();
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.toList();
Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null;
boolean indexedOnly = request.getBooleanProperty("indexedOnly");
controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed);
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) +
(indexedOnly ? ", for indexed types" : "") +
(speed != null ? ", with speed " + speed : ""));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed));
}
private static String toString(ApplicationReindexing.State state) {
return switch (state) {
case PENDING: yield "pending";
case RUNNING: yield "running";
case FAILED: yield "failed";
case SUCCESSFUL: yield "successful";
};
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone()),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
ensureApplicationExists(TenantAndApplicationId.from(id), request);
boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("dryRun", options))
.map(Boolean::valueOf)
.orElse(false);
controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request));
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if ( ! vespaVersion.isEmpty()) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
DeploymentResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber());
Cursor logArray = root.setArray("prepareMessages");
for (LogEntry logMessage : result.log()) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.epochMillis());
logObject.setString("level", logMessage.level().getName());
logObject.setString("message", logMessage.message());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
boolean forget = request.getBooleanProperty("forget");
if (forget && ! isOperator(request))
return ErrorResponse.forbidden("Only operators can forget a tenant");
controller.tenants().delete(TenantName.from(tenantName),
Optional.of(accessControlRequests.credentials(TenantName.from(tenantName),
toSlime(request.getData()).get(),
request.getJDiscRequest())),
forget);
return new MessageResponse("Deleted tenant " + tenantName);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId()))
.filter(run -> ! run.hasEnded())
.ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName()));
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(prodInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(prodInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
ApplicationId toTest = type.isProduction() ? prodInstanceId : id;
if ( ! type.isProduction())
deployments.add(new DeploymentId(toTest, type.zone()));
Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone());
if (deployment == null)
throw new NotExistsException(toTest + " is not deployed in " + type.zone());
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
deployment.version(),
deployment.revision(),
deployment.at(),
controller.routing().readTestRunnerEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null);
if (report != null) {
Cursor cursor = report.get();
boolean force = request.getBooleanProperty("force");
if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) {
throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString());
}
}
Slime requestPayload;
try {
requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes());
} catch (Exception e) {
throw new IllegalArgumentException("Missing or invalid JSON in request content", e);
}
Cursor requestPayloadCursor = requestPayload.get();
String configId = requestPayloadCursor.field("configId").asString();
long expiresAt = requestPayloadCursor.field("expiresAt").asLong();
if (configId.isEmpty()) {
throw new IllegalArgumentException("Missing configId");
}
Cursor artifactsCursor = requestPayloadCursor.field("artifacts");
int artifactEntries = artifactsCursor.entries();
if (artifactEntries == 0) {
throw new IllegalArgumentException("Missing or empty 'artifacts'");
}
Slime dumpRequest = new Slime();
Cursor dumpRequestCursor = dumpRequest.setObject();
dumpRequestCursor.setLong("createdMillis", controller.clock().millis());
dumpRequestCursor.setString("configId", configId);
Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts");
for (int i = 0; i < artifactEntries; i++) {
dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString());
}
if (expiresAt > 0) {
dumpRequestCursor.setLong("expiresAt", expiresAt);
}
Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions");
if (dumpOptionsCursor.children() > 0) {
SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions"));
}
var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest))));
nodeRepository.updateReports(zone, hostname, reportsUpdate);
boolean wait = request.getBooleanProperty("wait");
if (!wait) return new MessageResponse("Request created");
return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname);
}
private HttpResponse getServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname)
.orElseThrow(() -> new NotExistsException("No service dump for node " + hostname));
return new SlimeJsonResponse(report);
}
private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
int pollInterval = 2;
Slime report;
while (true) {
report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get();
Cursor cursor = report.get();
if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) {
break;
}
final Slime copyForLambda = report;
log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda))));
log.fine("Sleeping " + pollInterval + " seconds before checking report status again");
controller.sleeper().sleep(Duration.ofSeconds(pollInterval));
}
return new SlimeJsonResponse(report);
}
private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
Node node;
try {
node = nodeRepository.getNode(zone, hostname);
} catch (IllegalArgumentException e) {
throw new NotExistsException(hostname);
}
ApplicationId app = ApplicationId.from(tenant, application, instance);
ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner"));
if (!app.equals(owner)) {
throw new IllegalArgumentException("Node is not owned by " + app.toFullString());
}
String json = node.reports().get("serviceDump");
if (json == null) return Optional.empty();
return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
try {
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(object.setObject("quota"), usedQuota);
} catch (Exception e) {
log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e)));
}
cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role));
toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess"));
break;
}
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values();
if (instances.isEmpty() && !showOnlyActiveInstances(request))
toSlime(application.id(), applicationArray.addObject(), request);
for (Instance instance : instances) {
if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
} else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(ArchiveAccess archiveAccess, Cursor object) {
archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role));
archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member));
}
private void toSlime(Cursor object, QuotaUsage usage) {
object.setDouble("budgetUsed", usage.rate());
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("peakCpu", utilization.peakCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("peakMemory", utilization.peakMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
utilizationObject.setDouble("peakDisk", utilization.peakDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream()
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment)))
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
if (tenant.type() == Tenant.Type.deleted)
object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new IllegalArgumentException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static boolean showOnlyActiveInstances(HttpRequest request) {
return "true".equals(request.getProperty("activeInstances"));
}
private static boolean includeDeleted(HttpRequest request) {
return "true".equals(request.getProperty("includeDeleted"));
}
private static String tenantType(Tenant tenant) {
return switch (tenant.type()) {
case athenz: yield "ATHENS";
case cloud: yield "CLOUD";
case deleted: yield "DELETED";
};
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry());
}
private RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong();
projectId = projectId == 0 ? 1 : projectId;
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
Optional<String> description = optional("description", submitOptions);
int risk = (int) submitOptions.field("risk").asLong();
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application);
ensureApplicationExists(id, request);
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId);
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), 0),
0);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
return requireZone(ZoneId.from(environment, region));
}
private ZoneId requireZone(ZoneId zone) {
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
return switch (state) {
case in: yield "IN";
case out: yield "OUT";
case unknown: yield "UNKNOWN";
};
}
private static String endpointScopeString(Endpoint.Scope scope) {
return switch (scope) {
case weighted: yield "weighted";
case application: yield "application";
case global: yield "global";
case zone: yield "zone";
};
}
private static String routingMethodString(RoutingMethod method) {
return switch (method) {
case exclusive: yield "exclusive";
case sharedLayer4: yield "sharedLayer4";
};
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) {
if (controller.applications().getApplication(id).isEmpty()) {
if (controller.system().isPublic() || hasOktaContext(request)) {
log.fine("Application does not exist in public, creating: " + id);
var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest());
controller.applications().createApplication(id, credentials);
} else {
log.fine("Application does not exist in hosted, failing: " + id);
throw new IllegalArgumentException("Application does not exist. Create application in Console first.");
}
}
}
private boolean hasOktaContext(HttpRequest request) {
try {
OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context());
return true;
} catch (IllegalArgumentException e) {
return false;
}
}
private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) {
List<ZoneId> productionZones = spec.zones().stream()
.filter(z -> z.region().isPresent())
.map(z -> ZoneId.from(z.environment(), z.region().get()))
.toList();
return deployments.stream()
.sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone())))
.collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
} |
I've considered that, but then I saw that `/application/v4` already used "athensDomain": https://github.com/vespa-engine/vespa/blob/abe8f597b50d5fcfb379c136abc6462044f90e0c/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java#L2527 https://github.com/vespa-engine/vespa/blob/abe8f597b50d5fcfb379c136abc6462044f90e0c/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java#L2664 | private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
for (var endpoint : zoneEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application)
.targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
for (var endpoint : declaredEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", application.revisions().get(deployment.revision()).stringId());
response.setLong("build", deployment.revision().number());
Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment);
response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli()));
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> {
Cursor enclave = response.setObject("enclave");
enclave.setString("cloudAccount", cloudAccount.value());
enclave.setString("athensDomain", controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).value());
});
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
if (!deployment.zone().environment().isManuallyDeployed()) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone()));
Optional.ofNullable(status.jobSteps().get(jobId))
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision()));
if ( ! status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else
response.setString("status", "running");
});
} else {
var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId()));
deploymentRun.ifPresent(run -> {
response.setString("status", run.hasEnded() ? "complete" : "running");
});
}
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
} | enclave.setString("athensDomain", controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).value()); | private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId)
.scope(Endpoint.Scope.zone);
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
for (var endpoint : zoneEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application)
.targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
for (var endpoint : declaredEndpoints) {
toSlime(endpoint, endpointArray.addObject());
}
response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString());
response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", application.revisions().get(deployment.revision()).stringId());
response.setLong("build", deployment.revision().number());
Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment);
response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli()));
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> {
Cursor enclave = response.setObject("enclave");
enclave.setString("cloudAccount", cloudAccount.value());
enclave.setString("athensDomain", controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).value());
});
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod)
toSlime(instance.rotations(), instance.rotationStatus(), deployment, response);
if (!deployment.zone().environment().isManuallyDeployed()) {
DeploymentStatus status = controller.jobController().deploymentStatus(application);
JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone()));
Optional.ofNullable(status.jobSteps().get(jobId))
.ifPresent(stepStatus -> {
JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision()));
if ( ! status.jobsToRun().containsKey(stepStatus.job().get()))
response.setString("status", "complete");
else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true))
response.setString("status", "pending");
else
response.setString("status", "running");
});
} else {
var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId()));
deploymentRun.ifPresent(run -> {
response.setString("status", run.hasEnded() ? "complete" : "running");
});
}
}
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
return switch (request.getMethod()) {
case GET: yield handleGET(path, request);
case PUT: yield handlePUT(path, request);
case POST: yield handlePOST(path, request);
case PATCH: yield handlePATCH(path, request);
case DELETE: yield handleDELETE(path, request);
case OPTIONS: yield handleOPTIONS();
default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
};
}
catch (RestApiException.Forbidden e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (RestApiException.Unauthorized e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return switch (e.code()) {
case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR: yield ErrorResponses.logThrowing(request, log, e);
default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
};
}
catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse accessRequests(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var accessControlService = controller.serviceRegistry().accessControlService();
var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant);
var managedAccess = accessControlService.getManagedAccess(tenant);
var slime = new Slime();
var cursor = slime.setObject();
cursor.setBool("managedAccess", managedAccess);
accessRoleInformation.getPendingRequest()
.ifPresent(membershipRequest -> {
var requestCursor = cursor.setObject("pendingRequest");
requestCursor.setString("requestTime", membershipRequest.getCreationTime());
requestCursor.setString("reason", membershipRequest.getReason());
});
var auditLogCursor = cursor.setArray("auditLog");
accessRoleInformation.getAuditLog()
.forEach(auditLogEntry -> {
var entryCursor = auditLogCursor.addObject();
entryCursor.setString("created", auditLogEntry.getCreationTime());
entryCursor.setString("approver", auditLogEntry.getApprover());
entryCursor.setString("reason", auditLogEntry.getReason());
entryCursor.setString("status", auditLogEntry.getAction());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse requestSshAccess(String tenantName, HttpRequest request) {
if (!isOperator(request)) {
return ErrorResponse.forbidden("Only operators are allowed to request ssh access");
}
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only request access for cloud tenants");
controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName));
return new MessageResponse("OK");
}
private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var inspector = toSlime(request.getData()).get();
var expiry = inspector.field("expiry").valid() ?
Instant.ofEpochMilli(inspector.field("expiry").asLong()) :
Instant.now().plus(1, ChronoUnit.DAYS);
var approve = inspector.field("approve").asBool();
controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve);
return new MessageResponse("OK");
}
private HttpResponse addManagedAccess(String tenantName) {
return setManagedAccess(tenantName, true);
}
private HttpResponse removeManagedAccess(String tenantName) {
return setManagedAccess(tenantName, false);
}
private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only set access privel for cloud tenants");
controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess);
var slime = new Slime();
slime.setObject().setBool("managedAccess", managedAccess);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> handler.apply((CloudTenant) tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var contact = root.setObject("contact");
contact.setString("name", info.contact().name());
contact.setString("email", info.contact().email());
var tenant = root.setObject("tenant");
tenant.setString("company", info.name());
tenant.setString("website", info.website());
toSlime(info.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) {
return controller.tenants().get(tenantName)
.map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get()))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var mergedContact = TenantContact.empty()
.withName(getString(inspector.field("contact").field("name"), info.contact().name()))
.withEmail(getString(inspector.field("contact").field("email"), info.contact().email()));
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address());
var mergedInfo = info
.withName(getString(inspector.field("tenant").field("name"), info.name()))
.withWebsite(getString(inspector.field("tenant").field("website"), info.website()))
.withContact(mergedContact)
.withAddress(mergedAddress);
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var billingContact = info.billingContact();
var contact = root.setObject("contact");
contact.setString("name", billingContact.contact().name());
contact.setString("email", billingContact.contact().email());
contact.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var contact = info.billingContact().contact();
var address = info.billingContact().address();
var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address());
var mergedBilling = info.billingContact()
.withContact(mergedContact)
.withAddress(mergedAddress);
var mergedInfo = info.withBilling(mergedBilling);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
toSlime(cloudTenant.info().contacts(), root);
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) {
var mergedInfo = cloudTenant.info()
.withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts()));
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private void validateMergedTenantInfo(TenantInfo mergedInfo) {
if (mergedInfo.contact().name().isBlank()) {
throw new IllegalArgumentException("'contactName' cannot be empty");
}
if (mergedInfo.contact().email().isBlank()) {
throw new IllegalArgumentException("'contactEmail' cannot be empty");
}
if (! mergedInfo.contact().email().contains("@")) {
throw new IllegalArgumentException("'contactEmail' needs to be an email address");
}
if (! mergedInfo.website().isBlank()) {
try {
new URL(mergedInfo.website());
} catch (MalformedURLException e) {
throw new IllegalArgumentException("'website' needs to be a valid address");
}
}
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
Cursor contactsCursor = parentCursor.setArray("contacts");
contacts.all().forEach(contact -> {
Cursor contactCursor = contactsCursor.addObject();
Cursor audiencesArray = contactCursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
contactCursor.setString("email", email.email());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
});
}
private static TenantContacts.Audience fromAudience(String value) {
return switch (value) {
case "tenant": yield TenantContacts.Audience.TENANT;
case "notifications": yield TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
};
}
private static String toAudience(TenantContacts.Audience audience) {
return switch (audience) {
case TENANT: yield "tenant";
case NOTIFICATIONS: yield "notifications";
};
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString().trim() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantContact mergedContact = TenantContact.empty()
.withName(getString(insp.field("contactName"), oldInfo.contact().name()))
.withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email()));
TenantInfo mergedInfo = TenantInfo.empty()
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.website()))
.withContact(mergedContact)
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()))
.withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts()));
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) {
if (!insp.valid()) return oldAddress;
TenantAddress address = TenantAddress.empty()
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code()))
.withAddress(getString(insp.field("addressLines"), oldAddress.address()));
List<String> fields = List.of(address.address(),
address.code(),
address.country(),
address.city(),
address.region());
if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank))
return address;
throw new IllegalArgumentException("All address fields must be set");
}
private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) {
if (!insp.valid()) return oldContact;
String email = getString(insp.field("email"), oldContact.email());
if (!email.isBlank() && !email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return TenantContact.empty()
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()));
}
private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) {
if (!insp.valid()) return oldContact;
return TenantBilling.empty()
.withContact(updateTenantInfoContact(insp, oldContact.contact()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) {
if (!insp.valid()) return oldContacts;
List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> {
String email = inspector.field("email").asString().trim();
List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> fromAudience(audience.asString()))
.toList();
if (!email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return new TenantContacts.EmailContact(audiences, email);
}).toList();
return new TenantContacts(contacts);
}
private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) {
boolean productionOnly = showOnlyProductionInstances(request);
boolean excludeMessages = "true".equals(request.getProperty("excludeMessages"));
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
tenant.map(t -> Stream.of(TenantName.from(t)))
.orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream())
.flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream())
.filter(notification ->
propertyEquals(request, "application", ApplicationName::from, notification.source().application()) &&
propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) &&
propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) &&
propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) &&
propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) &&
propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level())))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages));
return new SlimeJsonResponse(slime);
}
private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) {
return Optional.ofNullable(request.getProperty(property))
.map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get()))
.orElse(true);
}
private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
if (!excludeMessages) {
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
}
if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value());
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
return switch (type) {
case submission, applicationPackage: yield "applicationPackage";
case testPackage: yield "testPackage";
case deployment: yield "deployment";
case feedBlock: yield "feedBlock";
case reindex: yield "reindex";
};
}
private static String notificationLevelAsString(Notification.Level level) {
return switch (level) {
case info: yield "info";
case warning: yield "warning";
case error: yield "error";
};
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
getTenantOrThrow(tenantName);
List<Application> applications = applicationName.isEmpty() ?
controller.applications().asList(tenant) :
controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get()))
.map(List::of)
.orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist"));
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : applications) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
ZoneId zone = type.zone();
RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision();
byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse devApplicationPackageDiff(RunId runId) {
DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone());
return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
final long build;
String requestedBuild = request.getProperty("build");
if (requestedBuild != null) {
if (requestedBuild.equals("latestDeployed")) {
build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision()
.map(RevisionId::number)
.orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication));
} else {
try {
build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("invalid value for request parameter 'build'", e);
}
}
} else {
build = controller.applications().requireApplication(tenantAndApplication).revisions().last()
.map(version -> version.id().number())
.orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication));
}
RevisionId revision = RevisionId.forProduction(build);
boolean tests = request.getBooleanProperty("tests");
byte[] applicationPackage = tests ?
controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) :
controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision);
String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip";
return new ZipResponse(filename, applicationPackage);
}
private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) {
Slime slime = new Slime();
OptionalInt allowMajor = OptionalInt.empty();
if (allowMajorParam != null) {
try {
allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e);
}
}
Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor);
slime.setObject().setString("compileVersion", compileVersion.toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
if (!applicationId.tenant().equals(TenantName.from(tenantName)))
return ErrorResponse.badRequest("Invalid application id");
var zoneId = requireZone(ZoneId.from(request.getProperty("zone")));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("AWS archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeAwsArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var member = mandatory("member", data).asString();
if (member.isBlank()) {
return ErrorResponse.badRequest("GCP archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + ".");
}
private HttpResponse removeGcpArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id));
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor));
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.down());
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
nodeObject.setString("group", node.group());
nodeObject.setLong("index", node.index());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
return switch (state) {
case failed: yield "failed";
case parked: yield "parked";
case dirty: yield "dirty";
case ready: yield "ready";
case active: yield "active";
case inactive: yield "inactive";
case reserved: yield "reserved";
case provisioned: yield "provisioned";
case breakfixed: yield "breakfixed";
case deprovisioned: yield "deprovisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
};
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
return switch (type) {
case admin: yield "admin";
case content: yield "content";
case container: yield "container";
case combined: yield "combined";
case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
};
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
return switch (diskSpeed) {
case fast : yield "fast";
case slow : yield "slow";
case any : yield "any";
};
}
private static String valueOf(NodeResources.StorageType storageType) {
return switch (storageType) {
case remote : yield "remote";
case local : yield "local";
case any : yield "any";
};
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
@Override
public long maxPendingBytes() {
return 1 << 26;
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
var from = Optional.ofNullable(request.getProperty("from"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.EPOCH);
var until = Optional.ofNullable(request.getProperty("until"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.now(controller.clock()));
var application = ApplicationId.from(tenantName, applicationName, instanceName);
var zone = requireZone(environment, region);
var deployment = new DeploymentId(application, zone);
var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment);
var slime = new Slime();
var root = slime.setObject();
for (var entry : events.entrySet()) {
var serviceRoot = root.setArray(entry.getKey().clusterId().value());
scalingEventsToSlime(entry.getValue(), serviceRoot);
}
return new SlimeJsonResponse(slime);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
boolean upgradeRevision = ! requestObject.field("skipRevision").asBool();
boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") +
(upgradeRevision ? "" : "revision") +
( ! upgradeRevision && ! upgradePlatform ? " and " : "") +
(upgradePlatform ? "" : "platform") +
( ! upgradeRevision || ! upgradePlatform ? " upgrade" : "");
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id + suppressedUpgrades);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), status.application());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application());
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
addRotationId(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void addRotationId(Cursor object, Instance instance) {
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.revisions().last().ifPresent(version -> {
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
addRotationId(object, instance);
List<Deployment> deployments = application.deploymentSpec().instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
Stream.concat(status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment()),
controller.jobController().active(instance.id()).stream()
.map(run -> run.id().job())
.filter(job -> job.type().environment().isManuallyDeployed()))
.map(job -> job.type().zone())
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change, Application application) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision)));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
DeploymentId deploymentId = new DeploymentId(instance.id(), zone);
RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant;
RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out;
controller.routing().of(deploymentId).setRoutingStatus(status, agent);
return new MessageResponse(Text.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId())
.requiresRotation()
.primary();
if (primaryEndpoint.isPresent()) {
DeploymentRoutingContext context = controller.routing().of(deploymentId);
RoutingStatus status = context.routingStatus();
array.addString(primaryEndpoint.get().upstreamName(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.value().name());
statusObject.setString("reason", "");
statusObject.setString("agent", status.agent().name());
statusObject.setLong("timestamp", status.changedAt().getEpochSecond());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId,
serviceName,
DomainName.of(host),
HttpURL.Path.parse("/status").append(restPath),
Query.empty().add(request.getJDiscRequest().parameters()));
}
private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodes(deploymentId);
}
private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Query query = Query.empty().add(request.getJDiscRequest().parameters());
query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString());
return controller.serviceRegistry().configServer().getServiceNodePage(
deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query);
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
if (controller.system().isPublic()) {
User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
TenantInfo info = controller.tenants().require(tenant, CloudTenant.class)
.info()
.withContact(TenantContact.from(user.name(), user.email()));
controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(info);
controller.tenants().store(lockedTenant);
});
}
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName), Tags.empty());
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if ( ! versionStatus.isActive(version) && ! isOperator(request))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Inspector buildField = toSlime(request.getData()).get().field("build");
long build = buildField.valid() ? buildField.asLong() : -1;
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
RevisionId revision = build == -1 ? application.get().revisions().last().get().id()
: getRevision(application.get(), build);
Change change = Change.of(revision);
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
private RevisionId getRevision(Application application, long build) {
return application.revisions().withPackage().stream()
.map(ApplicationVersion::id)
.filter(version -> version.number() == build)
.findFirst()
.filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(),
application.id().application(),
build))
.orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found"));
}
private HttpResponse cancelBuild(String tenantName, String applicationName, String build){
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
RevisionId revision = RevisionId.forProduction(Long.parseLong(build));
controller.applications().lockApplicationOrThrow(id, application -> {
controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped())));
for (Instance instance : application.get().instances().values())
if (instance.change().revision().equals(Optional.of(revision)))
controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION);
});
return new MessageResponse("Marked build '" + build + "' as non-deployable");
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.toList();
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.toList();
Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null;
boolean indexedOnly = request.getBooleanProperty("indexedOnly");
controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed);
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) +
(indexedOnly ? ", for indexed types" : "") +
(speed != null ? ", with speed " + speed : ""));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed));
}
private static String toString(ApplicationReindexing.State state) {
return switch (state) {
case PENDING: yield "pending";
case RUNNING: yield "running";
case FAILED: yield "failed";
case SUCCESSFUL: yield "successful";
};
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone()),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
ensureApplicationExists(TenantAndApplicationId.from(id), request);
boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("dryRun", options))
.map(Boolean::valueOf)
.orElse(false);
controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request));
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if ( ! vespaVersion.isEmpty()) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
DeploymentResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber());
Cursor logArray = root.setArray("prepareMessages");
for (LogEntry logMessage : result.log()) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.epochMillis());
logObject.setString("level", logMessage.level().getName());
logObject.setString("message", logMessage.message());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
boolean forget = request.getBooleanProperty("forget");
if (forget && ! isOperator(request))
return ErrorResponse.forbidden("Only operators can forget a tenant");
controller.tenants().delete(TenantName.from(tenantName),
Optional.of(accessControlRequests.credentials(TenantName.from(tenantName),
toSlime(request.getData()).get(),
request.getJDiscRequest())),
forget);
return new MessageResponse("Deleted tenant " + tenantName);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId()))
.filter(run -> ! run.hasEnded())
.ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName()));
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(prodInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(prodInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
ApplicationId toTest = type.isProduction() ? prodInstanceId : id;
if ( ! type.isProduction())
deployments.add(new DeploymentId(toTest, type.zone()));
Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone());
if (deployment == null)
throw new NotExistsException(toTest + " is not deployed in " + type.zone());
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
deployment.version(),
deployment.revision(),
deployment.at(),
controller.routing().readTestRunnerEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null);
if (report != null) {
Cursor cursor = report.get();
boolean force = request.getBooleanProperty("force");
if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) {
throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString());
}
}
Slime requestPayload;
try {
requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes());
} catch (Exception e) {
throw new IllegalArgumentException("Missing or invalid JSON in request content", e);
}
Cursor requestPayloadCursor = requestPayload.get();
String configId = requestPayloadCursor.field("configId").asString();
long expiresAt = requestPayloadCursor.field("expiresAt").asLong();
if (configId.isEmpty()) {
throw new IllegalArgumentException("Missing configId");
}
Cursor artifactsCursor = requestPayloadCursor.field("artifacts");
int artifactEntries = artifactsCursor.entries();
if (artifactEntries == 0) {
throw new IllegalArgumentException("Missing or empty 'artifacts'");
}
Slime dumpRequest = new Slime();
Cursor dumpRequestCursor = dumpRequest.setObject();
dumpRequestCursor.setLong("createdMillis", controller.clock().millis());
dumpRequestCursor.setString("configId", configId);
Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts");
for (int i = 0; i < artifactEntries; i++) {
dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString());
}
if (expiresAt > 0) {
dumpRequestCursor.setLong("expiresAt", expiresAt);
}
Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions");
if (dumpOptionsCursor.children() > 0) {
SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions"));
}
var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest))));
nodeRepository.updateReports(zone, hostname, reportsUpdate);
boolean wait = request.getBooleanProperty("wait");
if (!wait) return new MessageResponse("Request created");
return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname);
}
private HttpResponse getServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname)
.orElseThrow(() -> new NotExistsException("No service dump for node " + hostname));
return new SlimeJsonResponse(report);
}
private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
int pollInterval = 2;
Slime report;
while (true) {
report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get();
Cursor cursor = report.get();
if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) {
break;
}
final Slime copyForLambda = report;
log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda))));
log.fine("Sleeping " + pollInterval + " seconds before checking report status again");
controller.sleeper().sleep(Duration.ofSeconds(pollInterval));
}
return new SlimeJsonResponse(report);
}
private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
Node node;
try {
node = nodeRepository.getNode(zone, hostname);
} catch (IllegalArgumentException e) {
throw new NotExistsException(hostname);
}
ApplicationId app = ApplicationId.from(tenant, application, instance);
ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner"));
if (!app.equals(owner)) {
throw new IllegalArgumentException("Node is not owned by " + app.toFullString());
}
String json = node.reports().get("serviceDump");
if (json == null) return Optional.empty();
return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
try {
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(object.setObject("quota"), usedQuota);
} catch (Exception e) {
log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e)));
}
cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role));
toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess"));
break;
}
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values();
if (instances.isEmpty() && !showOnlyActiveInstances(request))
toSlime(application.id(), applicationArray.addObject(), request);
for (Instance instance : instances) {
if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
} else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(ArchiveAccess archiveAccess, Cursor object) {
archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role));
archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member));
}
private void toSlime(Cursor object, QuotaUsage usage) {
object.setDouble("budgetUsed", usage.rate());
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("peakCpu", utilization.peakCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("peakMemory", utilization.peakMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
utilizationObject.setDouble("peakDisk", utilization.peakDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream()
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment)))
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
if (tenant.type() == Tenant.Type.deleted)
object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new IllegalArgumentException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static boolean showOnlyActiveInstances(HttpRequest request) {
return "true".equals(request.getProperty("activeInstances"));
}
private static boolean includeDeleted(HttpRequest request) {
return "true".equals(request.getProperty("includeDeleted"));
}
private static String tenantType(Tenant tenant) {
return switch (tenant.type()) {
case athenz: yield "ATHENS";
case cloud: yield "CLOUD";
case deleted: yield "DELETED";
};
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry());
}
private RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong();
projectId = projectId == 0 ? 1 : projectId;
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
Optional<String> description = optional("description", submitOptions);
int risk = (int) submitOptions.field("risk").asLong();
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application);
ensureApplicationExists(id, request);
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId);
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), 0),
0);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
return requireZone(ZoneId.from(environment, region));
}
private ZoneId requireZone(ZoneId zone) {
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
return switch (state) {
case in: yield "IN";
case out: yield "OUT";
case unknown: yield "UNKNOWN";
};
}
private static String endpointScopeString(Endpoint.Scope scope) {
return switch (scope) {
case weighted: yield "weighted";
case application: yield "application";
case global: yield "global";
case zone: yield "zone";
};
}
private static String routingMethodString(RoutingMethod method) {
return switch (method) {
case exclusive: yield "exclusive";
case sharedLayer4: yield "sharedLayer4";
};
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) {
if (controller.applications().getApplication(id).isEmpty()) {
if (controller.system().isPublic() || hasOktaContext(request)) {
log.fine("Application does not exist in public, creating: " + id);
var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest());
controller.applications().createApplication(id, credentials);
} else {
log.fine("Application does not exist in hosted, failing: " + id);
throw new IllegalArgumentException("Application does not exist. Create application in Console first.");
}
}
}
private boolean hasOktaContext(HttpRequest request) {
try {
OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context());
return true;
} catch (IllegalArgumentException e) {
return false;
}
}
private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) {
List<ZoneId> productionZones = spec.zones().stream()
.filter(z -> z.region().isPresent())
.map(z -> ZoneId.from(z.environment(), z.region().get()))
.toList();
return deployments.stream()
.sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone())))
.collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
} | class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
private final Controller controller;
private final AccessControlRequests accessControlRequests;
private final TestConfigSerializer testConfigSerializer;
@Inject
public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
return switch (request.getMethod()) {
case GET: yield handleGET(path, request);
case PUT: yield handlePUT(path, request);
case POST: yield handlePOST(path, request);
case PATCH: yield handlePATCH(path, request);
case DELETE: yield handleDELETE(path, request);
case OPTIONS: yield handleOPTIONS();
default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
};
}
catch (RestApiException.Forbidden e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (RestApiException.Unauthorized e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return switch (e.code()) {
case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR: yield ErrorResponses.logThrowing(request, log, e);
default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
};
}
catch (RuntimeException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId")));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile);
if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling);
if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyResponse response = new EmptyResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
List<Application> applications = controller.applications().asList();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
toSlime(tenantArray.addObject(),
tenant,
applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()),
request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "tenant");
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList(includeDeleted(request)))
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse accessRequests(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var accessControlService = controller.serviceRegistry().accessControlService();
var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant);
var managedAccess = accessControlService.getManagedAccess(tenant);
var slime = new Slime();
var cursor = slime.setObject();
cursor.setBool("managedAccess", managedAccess);
accessRoleInformation.getPendingRequest()
.ifPresent(membershipRequest -> {
var requestCursor = cursor.setObject("pendingRequest");
requestCursor.setString("requestTime", membershipRequest.getCreationTime());
requestCursor.setString("reason", membershipRequest.getReason());
});
var auditLogCursor = cursor.setArray("auditLog");
accessRoleInformation.getAuditLog()
.forEach(auditLogEntry -> {
var entryCursor = auditLogCursor.addObject();
entryCursor.setString("created", auditLogEntry.getCreationTime());
entryCursor.setString("approver", auditLogEntry.getApprover());
entryCursor.setString("reason", auditLogEntry.getReason());
entryCursor.setString("status", auditLogEntry.getAction());
});
return new SlimeJsonResponse(slime);
}
private HttpResponse requestSshAccess(String tenantName, HttpRequest request) {
if (!isOperator(request)) {
return ErrorResponse.forbidden("Only operators are allowed to request ssh access");
}
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only request access for cloud tenants");
controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName));
return new MessageResponse("OK");
}
private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only see access requests for cloud tenants");
var inspector = toSlime(request.getData()).get();
var expiry = inspector.field("expiry").valid() ?
Instant.ofEpochMilli(inspector.field("expiry").asLong()) :
Instant.now().plus(1, ChronoUnit.DAYS);
var approve = inspector.field("approve").asBool();
controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve);
return new MessageResponse("OK");
}
private HttpResponse addManagedAccess(String tenantName) {
return setManagedAccess(tenantName, true);
}
private HttpResponse removeManagedAccess(String tenantName) {
return setManagedAccess(tenantName, false);
}
private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) {
var tenant = TenantName.from(tenantName);
if (controller.tenants().require(tenant).type() != Tenant.Type.cloud)
return ErrorResponse.badRequest("Can only set access privel for cloud tenants");
controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess);
var slime = new Slime();
slime.setObject().setBool("managedAccess", managedAccess);
return new SlimeJsonResponse(slime);
}
private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> handler.apply((CloudTenant) tenant))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
Slime slime = new Slime();
Cursor infoCursor = slime.setObject();
if (!info.isEmpty()) {
infoCursor.setString("name", info.name());
infoCursor.setString("email", info.email());
infoCursor.setString("website", info.website());
infoCursor.setString("contactName", info.contact().name());
infoCursor.setString("contactEmail", info.contact().email());
toSlime(info.address(), infoCursor);
toSlime(info.billingContact(), infoCursor);
toSlime(info.contacts(), infoCursor);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var contact = root.setObject("contact");
contact.setString("name", info.contact().name());
contact.setString("email", info.contact().email());
var tenant = root.setObject("tenant");
tenant.setString("company", info.name());
tenant.setString("website", info.website());
toSlime(info.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) {
return controller.tenants().get(tenantName)
.map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get()))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var mergedContact = TenantContact.empty()
.withName(getString(inspector.field("contact").field("name"), info.contact().name()))
.withEmail(getString(inspector.field("contact").field("email"), info.contact().email()));
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address());
var mergedInfo = info
.withName(getString(inspector.field("tenant").field("name"), info.name()))
.withWebsite(getString(inspector.field("tenant").field("website"), info.website()))
.withContact(mergedContact)
.withAddress(mergedAddress);
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
var info = cloudTenant.info();
if (!info.isEmpty()) {
var billingContact = info.billingContact();
var contact = root.setObject("contact");
contact.setString("name", billingContact.contact().name());
contact.setString("email", billingContact.contact().email());
contact.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), root);
}
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) {
var info = cloudTenant.info();
var contact = info.billingContact().contact();
var address = info.billingContact().address();
var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact);
var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address());
var mergedBilling = info.billingContact()
.withContact(mergedContact)
.withAddress(mergedAddress);
var mergedInfo = info.withBilling(mergedBilling);
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) {
var slime = new Slime();
var root = slime.setObject();
toSlime(cloudTenant.info().contacts(), root);
return new SlimeJsonResponse(slime);
}
private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) {
var mergedInfo = cloudTenant.info()
.withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts()));
controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private void validateMergedTenantInfo(TenantInfo mergedInfo) {
if (mergedInfo.contact().name().isBlank()) {
throw new IllegalArgumentException("'contactName' cannot be empty");
}
if (mergedInfo.contact().email().isBlank()) {
throw new IllegalArgumentException("'contactEmail' cannot be empty");
}
if (! mergedInfo.contact().email().contains("@")) {
throw new IllegalArgumentException("'contactEmail' needs to be an email address");
}
if (! mergedInfo.website().isBlank()) {
try {
new URL(mergedInfo.website());
} catch (MalformedURLException e) {
throw new IllegalArgumentException("'website' needs to be a valid address");
}
}
}
private void toSlime(TenantAddress address, Cursor parentCursor) {
if (address.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("address");
addressCursor.setString("addressLines", address.address());
addressCursor.setString("postalCodeOrZip", address.code());
addressCursor.setString("city", address.city());
addressCursor.setString("stateRegionProvince", address.region());
addressCursor.setString("country", address.country());
}
private void toSlime(TenantBilling billingContact, Cursor parentCursor) {
if (billingContact.isEmpty()) return;
Cursor addressCursor = parentCursor.setObject("billingContact");
addressCursor.setString("name", billingContact.contact().name());
addressCursor.setString("email", billingContact.contact().email());
addressCursor.setString("phone", billingContact.contact().phone());
toSlime(billingContact.address(), addressCursor);
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
Cursor contactsCursor = parentCursor.setArray("contacts");
contacts.all().forEach(contact -> {
Cursor contactCursor = contactsCursor.addObject();
Cursor audiencesArray = contactCursor.setArray("audiences");
contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience)));
switch (contact.type()) {
case EMAIL:
var email = (TenantContacts.EmailContact) contact;
contactCursor.setString("email", email.email());
return;
default:
throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type());
}
});
}
private static TenantContacts.Audience fromAudience(String value) {
return switch (value) {
case "tenant": yield TenantContacts.Audience.TENANT;
case "notifications": yield TenantContacts.Audience.NOTIFICATIONS;
default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'.");
};
}
private static String toAudience(TenantContacts.Audience audience) {
return switch (audience) {
case TENANT: yield "tenant";
case NOTIFICATIONS: yield "notifications";
};
}
private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.filter(tenant -> tenant.type() == Tenant.Type.cloud)
.map(tenant -> updateTenantInfo(((CloudTenant)tenant), request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
}
private String getString(Inspector field, String defaultVale) {
return field.valid() ? field.asString().trim() : defaultVale;
}
private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) {
TenantInfo oldInfo = tenant.info();
Inspector insp = toSlime(request.getData()).get();
TenantContact mergedContact = TenantContact.empty()
.withName(getString(insp.field("contactName"), oldInfo.contact().name()))
.withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email()));
TenantInfo mergedInfo = TenantInfo.empty()
.withName(getString(insp.field("name"), oldInfo.name()))
.withEmail(getString(insp.field("email"), oldInfo.email()))
.withWebsite(getString(insp.field("website"), oldInfo.website()))
.withContact(mergedContact)
.withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address()))
.withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact()))
.withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts()));
validateMergedTenantInfo(mergedInfo);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(mergedInfo);
controller.tenants().store(lockedTenant);
});
return new MessageResponse("Tenant info updated");
}
private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) {
if (!insp.valid()) return oldAddress;
TenantAddress address = TenantAddress.empty()
.withCountry(getString(insp.field("country"), oldAddress.country()))
.withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region()))
.withCity(getString(insp.field("city"), oldAddress.city()))
.withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code()))
.withAddress(getString(insp.field("addressLines"), oldAddress.address()));
List<String> fields = List.of(address.address(),
address.code(),
address.country(),
address.city(),
address.region());
if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank))
return address;
throw new IllegalArgumentException("All address fields must be set");
}
private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) {
if (!insp.valid()) return oldContact;
String email = getString(insp.field("email"), oldContact.email());
if (!email.isBlank() && !email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return TenantContact.empty()
.withName(getString(insp.field("name"), oldContact.name()))
.withEmail(getString(insp.field("email"), oldContact.email()))
.withPhone(getString(insp.field("phone"), oldContact.phone()));
}
private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) {
if (!insp.valid()) return oldContact;
return TenantBilling.empty()
.withContact(updateTenantInfoContact(insp, oldContact.contact()))
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) {
if (!insp.valid()) return oldContacts;
List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> {
String email = inspector.field("email").asString().trim();
List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences"))
.map(audience -> fromAudience(audience.asString()))
.toList();
if (!email.contains("@")) {
throw new IllegalArgumentException("'email' needs to be an email address");
}
return new TenantContacts.EmailContact(audiences, email);
}).toList();
return new TenantContacts(contacts);
}
private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) {
boolean productionOnly = showOnlyProductionInstances(request);
boolean excludeMessages = "true".equals(request.getProperty("excludeMessages"));
Slime slime = new Slime();
Cursor notificationsArray = slime.setObject().setArray("notifications");
tenant.map(t -> Stream.of(TenantName.from(t)))
.orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream())
.flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream())
.filter(notification ->
propertyEquals(request, "application", ApplicationName::from, notification.source().application()) &&
propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) &&
propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) &&
propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) &&
propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) &&
propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level())))
.forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages));
return new SlimeJsonResponse(slime);
}
private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) {
return Optional.ofNullable(request.getProperty(property))
.map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get()))
.orElse(true);
}
private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) {
cursor.setLong("at", notification.at().toEpochMilli());
cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
if (!excludeMessages) {
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
}
if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value());
notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
notification.source().zoneId().ifPresent(zoneId -> {
cursor.setString("environment", zoneId.environment().value());
cursor.setString("region", zoneId.region().value());
});
notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
}
private static String notificationTypeAsString(Notification.Type type) {
return switch (type) {
case submission, applicationPackage: yield "applicationPackage";
case testPackage: yield "testPackage";
case deployment: yield "deployment";
case feedBlock: yield "feedBlock";
case reindex: yield "reindex";
};
}
private static String notificationLevelAsString(Notification.Level level) {
return switch (level) {
case info: yield "info";
case warning: yield "warning";
case error: yield "error";
};
}
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
getTenantOrThrow(tenantName);
List<Application> applications = applicationName.isEmpty() ?
controller.applications().asList(tenant) :
controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get()))
.map(List::of)
.orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist"));
Slime slime = new Slime();
Cursor applicationArray = slime.setArray();
for (Application application : applications) {
Cursor applicationObject = applicationArray.addObject();
applicationObject.setString("tenant", application.id().tenant().value());
applicationObject.setString("application", application.id().application().value());
applicationObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(),
request.getUri()).toString());
Cursor instanceArray = applicationObject.setArray("instances");
for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet()
: application.instances().keySet()) {
Cursor instanceObject = instanceArray.addObject();
instanceObject.setString("instance", instance.value());
instanceObject.setString("url", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + instance.value(),
request.getUri()).toString());
}
}
return new SlimeJsonResponse(slime);
}
private HttpResponse devApplicationPackage(ApplicationId id, JobType type) {
ZoneId zone = type.zone();
RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision();
byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
private HttpResponse devApplicationPackageDiff(RunId runId) {
DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone());
return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
}
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
final long build;
String requestedBuild = request.getProperty("build");
if (requestedBuild != null) {
if (requestedBuild.equals("latestDeployed")) {
build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision()
.map(RevisionId::number)
.orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication));
} else {
try {
build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("invalid value for request parameter 'build'", e);
}
}
} else {
build = controller.applications().requireApplication(tenantAndApplication).revisions().last()
.map(version -> version.id().number())
.orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication));
}
RevisionId revision = RevisionId.forProduction(build);
boolean tests = request.getBooleanProperty("tests");
byte[] applicationPackage = tests ?
controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) :
controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision);
String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip";
return new ZipResponse(filename, applicationPackage);
}
private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
.map(ByteArrayResponse::new)
.orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) {
Slime slime = new Slime();
OptionalInt allowMajor = OptionalInt.empty();
if (allowMajorParam != null) {
try {
allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e);
}
}
Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor);
slime.setObject().setString("compileVersion", compileVersion.toFullString());
return new SlimeJsonResponse(slime);
}
private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
Principal user = request.getJDiscRequest().getUserPrincipal();
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withDeveloperKey(developerKey, user);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) {
var awsRegion = request.getProperty("aws-region");
var parameterName = request.getProperty("parameter-name");
var applicationId = ApplicationId.fromFullString(request.getProperty("application-id"));
if (!applicationId.tenant().equals(TenantName.from(tenantName)))
return ErrorResponse.badRequest("Invalid application id");
var zoneId = requireZone(ZoneId.from(request.getProperty("zone")));
var deploymentId = new DeploymentId(applicationId, zoneId);
var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class);
var tenantSecretStore = tenant.tenantSecretStores()
.stream()
.filter(secretStore -> secretStore.getName().equals(secretStoreName))
.findFirst();
if (tenantSecretStore.isEmpty())
return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'");
var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName);
try {
var responseRoot = new Slime();
var responseCursor = responseRoot.setObject();
responseCursor.setString("target", deploymentId.toString());
var responseResultCursor = responseCursor.setObject("result");
var responseSlime = SlimeUtils.jsonToSlime(response);
SlimeUtils.copyObject(responseSlime.get(), responseResultCursor);
return new SlimeJsonResponse(responseRoot);
} catch (JsonParseException e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString();
PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey);
Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey);
Slime root = new Slime();
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> {
tenant = tenant.withoutDeveloperKey(developerKey);
toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys());
controller.tenants().store(tenant);
});
return new SlimeJsonResponse(root);
}
private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) {
keys.forEach((key, principal) -> {
Cursor keyObject = keysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", principal.getName());
});
}
private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) {
String pemDeployKey = toSlime(request.getData()).get().field("key").asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
Slime root = new Slime();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
application = application.withoutDeployKey(deployKey);
application.get().deployKeys().stream()
.map(KeyUtils::toPem)
.forEach(root.setObject().setArray("keys")::addString);
controller.applications().store(application);
});
return new SlimeJsonResponse(root);
}
private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var awsId = mandatory("awsId", data).asString();
var externalId = mandatory("externalId", data).asString();
var role = mandatory("role", data).asString();
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId);
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) {
var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var optionalSecretStore = tenant.tenantSecretStores().stream()
.filter(secretStore -> secretStore.getName().equals(name))
.findFirst();
if (optionalSecretStore.isEmpty())
return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found");
var tenantSecretStore = optionalSecretStore.get();
controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore);
controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole());
controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore);
controller.tenants().store(lockedTenant);
});
tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class);
var slime = new Slime();
toSlime(slime.setObject(), tenant.tenantSecretStores());
return new SlimeJsonResponse(slime);
}
private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var role = mandatory("role", data).asString();
if (role.isBlank()) {
return ErrorResponse.badRequest("AWS archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + ".");
}
private HttpResponse removeAwsArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("AWS archive access role removed for tenant " + tenantName + ".");
}
private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
var data = toSlime(request.getData()).get();
var member = mandatory("member", data).asString();
if (member.isBlank()) {
return ErrorResponse.badRequest("GCP archive access role can't be whitespace only");
}
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member));
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + ".");
}
private HttpResponse removeGcpArchiveAccess(String tenantName) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> {
var access = lockedTenant.get().archiveAccess();
lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember());
controller.tenants().store(lockedTenant);
});
return new MessageResponse("GCP archive access member removed for tenant " + tenantName + ".");
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.asString();
PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey);
application = application.withDeployKey(deployKey);
messageBuilder.add("Added deploy key " + pemDeployKey);
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
return controller.applications().getApplication(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id));
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor));
toSlime(node.resources(), nodeObject);
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
nodeObject.setBool("down", node.down());
nodeObject.setBool("retired", node.retired() || node.wantToRetire());
nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration());
nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration());
nodeObject.setString("group", node.group());
nodeObject.setLong("index", node.index());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id);
Slime slime = new Slime();
Cursor clustersObject = slime.setObject().setObject("clusters");
for (Cluster cluster : application.clusters().values()) {
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", cluster.type().name());
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
if (cluster.target().isPresent()
&& ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
toSlime(cluster.target().get(), clusterObject.setObject("target"));
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
return switch (state) {
case failed: yield "failed";
case parked: yield "parked";
case dirty: yield "dirty";
case ready: yield "ready";
case active: yield "active";
case inactive: yield "inactive";
case reserved: yield "reserved";
case provisioned: yield "provisioned";
case breakfixed: yield "breakfixed";
case deprovisioned: yield "deprovisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
};
}
static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case permanentlyDown: return "permanentlyDown";
case unorchestrated: return "unorchestrated";
case unknown: break;
}
return "unknown";
}
private static String valueOf(Node.ClusterType type) {
return switch (type) {
case admin: yield "admin";
case content: yield "content";
case container: yield "container";
case combined: yield "combined";
case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
};
}
private static String valueOf(NodeResources.DiskSpeed diskSpeed) {
return switch (diskSpeed) {
case fast : yield "fast";
case slow : yield "slow";
case any : yield "any";
};
}
private static String valueOf(NodeResources.StorageType storageType) {
return switch (storageType) {
case remote : yield "remote";
case local : yield "local";
case any : yield "any";
};
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
try (logStream) {
logStream.transferTo(outputStream);
}
}
@Override
public long maxPendingBytes() {
return 1 << 26;
}
};
}
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName());
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment);
return buildResponseFromProtonMetrics(protonMetrics);
}
private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
var from = Optional.ofNullable(request.getProperty("from"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.EPOCH);
var until = Optional.ofNullable(request.getProperty("until"))
.map(Long::valueOf)
.map(Instant::ofEpochSecond)
.orElse(Instant.now(controller.clock()));
var application = ApplicationId.from(tenantName, applicationName, instanceName);
var zone = requireZone(environment, region);
var deployment = new DeploymentId(application, zone);
var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment);
var slime = new Slime();
var root = slime.setObject();
for (var entry : events.entrySet()) {
var serviceRoot = root.setArray(entry.getKey().clusterId().value());
scalingEventsToSlime(entry.getValue(), serviceRoot);
}
return new SlimeJsonResponse(slime);
}
private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) {
try {
var jsonObject = jsonMapper.createObjectNode();
var jsonArray = jsonMapper.createArrayNode();
for (ProtonMetrics metrics : protonMetrics) {
jsonArray.add(metrics.toJson());
}
jsonObject.set("metrics", jsonArray);
return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject));
} catch (JsonProcessingException e) {
log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e);
return new JsonResponse(500, "");
}
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
boolean requireTests = ! requestObject.field("skipTests").asBool();
boolean reTrigger = requestObject.field("reTrigger").asBool();
boolean upgradeRevision = ! requestObject.field("skipRevision").asBool();
boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool();
String triggered = reTrigger
? controller.applications().deploymentTrigger()
.reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName()
: controller.applications().deploymentTrigger()
.forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform)
.stream().map(job -> job.type().jobName()).collect(joining(", "));
String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") +
(upgradeRevision ? "" : "revision") +
( ! upgradeRevision && ! upgradePlatform ? " and " : "") +
(upgradePlatform ? "" : "platform") +
( ! upgradeRevision || ! upgradePlatform ? " upgrade" : "");
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id + suppressedUpgrades);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private HttpResponse resume(ApplicationId id, JobType type) {
controller.applications().deploymentTrigger().resumeJob(id, type);
return new MessageResponse(type.jobName() + " for " + id + " resumed");
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/job/",
request.getUri()).toString());
DeploymentStatus status = controller.jobController().deploymentStatus(application);
application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version));
application.projectId().ifPresent(id -> object.setLong("projectId", id));
application.instances().values().stream().findFirst().ifPresent(instance -> {
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
});
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor instancesArray = object.setArray("instances");
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values())
toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request);
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) {
object.setString("instance", instance.name().value());
if (deploymentSpec.instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), status.application());
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application());
Cursor changeBlockers = object.setArray("changeBlockers");
deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
addRotationId(object, instance);
List<Deployment> deployments = deploymentSpec.instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor deploymentsArray = object.setArray("deployments");
for (Deployment deployment : deployments) {
Cursor deploymentObject = deploymentsArray.addObject();
if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty())
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/instance/" + instance.name().value() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
}
private void addRotationId(Cursor object, Instance instance) {
instance.rotations().stream()
.map(AssignedRotation::rotationId)
.findFirst()
.ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
}
private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) {
Application application = status.application();
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + instance.id().tenant().value() +
"/application/" + instance.id().application().value() +
"/instance/" + instance.id().instance().value() + "/job/",
request.getUri()).toString());
application.revisions().last().ifPresent(version -> {
version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url));
version.commit().ifPresent(commit -> object.setString("commit", commit));
});
application.projectId().ifPresent(id -> object.setLong("projectId", id));
if (application.deploymentSpec().instance(instance.name()).isPresent()) {
Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values();
if ( ! instance.change().isEmpty())
toSlime(object.setObject("deploying"), instance.change(), application);
if ( ! status.outstandingChange(instance.name()).isEmpty())
toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application);
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
}));
}
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
addRotationId(object, instance);
List<Deployment> deployments = application.deploymentSpec().instance(instance.name())
.map(spec -> sortedDeployments(instance.deployments().values(), spec))
.orElse(List.copyOf(instance.deployments().values()));
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (deployment.zone().environment() == Environment.prod) {
if (instance.rotations().size() == 1) {
toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment),
deploymentObject);
}
if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) {
toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject);
}
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", instance.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value(),
request.getUri()).toString());
}
}
Stream.concat(status.jobSteps().keySet().stream()
.filter(job -> job.application().instance().equals(instance.name()))
.filter(job -> job.type().isProduction() && job.type().isDeployment()),
controller.jobController().active(instance.id()).stream()
.map(run -> run.id().job())
.filter(job -> job.type().environment().isManuallyDeployed()))
.map(job -> job.type().zone())
.filter(zone -> ! instance.deployments().containsKey(zone))
.forEach(zone -> {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", zone.environment().value());
deploymentObject.setString("region", zone.region().value());
});
application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key)));
application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString);
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment,
String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
requireZone(environment, region));
Deployment deployment = instance.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change, Application application) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision)));
}
private void toSlime(Endpoint endpoint, Cursor object) {
object.setString("cluster", endpoint.cluster().value());
object.setBool("tls", endpoint.tls());
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
object.setBool("legacy", endpoint.legacy());
}
private void toSlime(RotationState state, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", rotationStateString(state));
}
private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) {
var array = object.setArray("endpointStatus");
for (var rotation : rotations) {
var statusObject = array.addObject();
var targets = status.of(rotation.rotationId());
statusObject.setString("endpointId", rotation.endpointId().id());
statusObject.setString("rotationId", rotation.rotationId().asString());
statusObject.setString("clusterId", rotation.clusterId().value());
statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment)));
statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli());
}
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = requireZone(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
DeploymentId deploymentId = new DeploymentId(instance.id(), zone);
RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant;
RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out;
controller.routing().of(deploymentId).setRoutingStatus(status, agent);
return new MessageResponse(Text.format("Successfully set %s in %s %s service",
instance.id().toShortString(), zone, inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId())
.requiresRotation()
.primary();
if (primaryEndpoint.isPresent()) {
DeploymentRoutingContext context = controller.routing().of(deploymentId);
RoutingStatus status = context.routingStatus();
array.addString(primaryEndpoint.get().upstreamName(deploymentId));
Cursor statusObject = array.addObject();
statusObject.setString("status", status.value().name());
statusObject.setString("reason", "");
statusObject.setString("agent", status.agent().name());
statusObject.setLong("timestamp", status.changedAt().getEpochSecond());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = requireZone(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(instance + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(instance.rotationStatus().of(rotation, deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! instance.change().isEmpty()) {
instance.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString()));
root.setBool("pinned", instance.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId,
serviceName,
DomainName.of(host),
HttpURL.Path.parse("/status").append(restPath),
Query.empty().add(request.getJDiscRequest().parameters()));
}
private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getServiceNodes(deploymentId);
}
private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Query query = Query.empty().add(request.getJDiscRequest().parameters());
query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString());
return controller.serviceRegistry().configServer().getServiceNodePage(
deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query);
}
private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri());
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
if (controller.system().isPublic()) {
User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
TenantInfo info = controller.tenants().require(tenant, CloudTenant.class)
.info()
.withContact(TenantContact.from(user.name(), user.email()));
controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withInfo(info);
controller.tenants().store(lockedTenant);
});
}
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest());
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName);
if (controller.applications().getApplication(applicationId).isEmpty())
createApplication(tenantName, applicationName, request);
controller.applications().createInstance(applicationId.instance(instanceName), Tags.empty());
Slime slime = new Slime();
toSlime(applicationId.instance(instanceName), slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Version version = Version.fromString(versionString);
VersionStatus versionStatus = controller.readVersionStatus();
if (version.equals(Version.emptyVersion))
version = controller.systemVersion(versionStatus);
if ( ! versionStatus.isActive(version) && ! isOperator(request))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + versionStatus.versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Inspector buildField = toSlime(request.getData()).get().field("build");
long build = buildField.valid() ? buildField.asLong() : -1;
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
RevisionId revision = build == -1 ? application.get().revisions().last().get().id()
: getRevision(application.get(), build);
Change change = Change.of(revision);
controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request));
response.append("Triggered ").append(change).append(" for ").append(id);
});
return new MessageResponse(response.toString());
}
private RevisionId getRevision(Application application, long build) {
return application.revisions().withPackage().stream()
.map(ApplicationVersion::id)
.filter(version -> version.number() == build)
.findFirst()
.filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(),
application.id().application(),
build))
.orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found"));
}
private HttpResponse cancelBuild(String tenantName, String applicationName, String build){
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
RevisionId revision = RevisionId.forProduction(Long.parseLong(build));
controller.applications().lockApplicationOrThrow(id, application -> {
controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped())));
for (Instance instance : application.get().instances().values())
if (instance.change().revision().equals(Optional.of(revision)))
controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION);
});
return new MessageResponse("Marked build '" + build + "' as non-deployable");
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
Change change = application.get().require(id.instance()).change();
if (change.isEmpty()) {
response.append("No deployment in progress for ").append(id).append(" at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id);
});
return new MessageResponse(response.toString());
}
/** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */
private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream()
.flatMap(clusters -> Stream.of(clusters.split(",")))
.filter(cluster -> ! cluster.isBlank())
.toList();
List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream()
.flatMap(types -> Stream.of(types.split(",")))
.filter(type -> ! type.isBlank())
.toList();
Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null;
boolean indexedOnly = request.getBooleanProperty("indexedOnly");
controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed);
return new MessageResponse("Requested reindexing of " + id + " in " + zone +
(clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) +
(documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) +
(indexedOnly ? ", for indexed types" : "") +
(speed != null ? ", with speed " + speed : ""));
}
/** Gets reindexing status of an application in a zone. */
private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone);
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setBool("enabled", reindexing.enabled());
Cursor clustersArray = root.setArray("clusters");
reindexing.clusters().entrySet().stream().sorted(comparingByKey())
.forEach(cluster -> {
Cursor clusterObject = clustersArray.addObject();
clusterObject.setString("name", cluster.getKey());
Cursor pendingArray = clusterObject.setArray("pending");
cluster.getValue().pending().entrySet().stream().sorted(comparingByKey())
.forEach(pending -> {
Cursor pendingObject = pendingArray.addObject();
pendingObject.setString("type", pending.getKey());
pendingObject.setLong("requiredGeneration", pending.getValue());
});
Cursor readyArray = clusterObject.setArray("ready");
cluster.getValue().ready().entrySet().stream().sorted(comparingByKey())
.forEach(ready -> {
Cursor readyObject = readyArray.addObject();
readyObject.setString("type", ready.getKey());
setStatus(readyObject, ready.getValue());
});
});
return new SlimeJsonResponse(slime);
}
void setStatus(Cursor statusObject, ApplicationReindexing.Status status) {
status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli()));
status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli()));
status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli()));
status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state));
status.message().ifPresent(message -> statusObject.setString("message", message));
status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed));
}
private static String toString(ApplicationReindexing.State state) {
return switch (state) {
case PENDING: yield "pending";
case RUNNING: yield "running";
case FAILED: yield "failed";
case SUCCESSFUL: yield "successful";
};
}
/** Enables reindexing of an application in a zone. */
private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().enableReindexing(id, zone);
return new MessageResponse("Enabled reindexing of " + id + " in " + zone);
}
/** Disables reindexing of an application in a zone. */
private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
controller.applications().disableReindexing(id, zone);
return new MessageResponse("Disabled reindexing of " + id + " in " + zone);
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
RestartFilter restartFilter = new RestartFilter()
.withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of))
.withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from))
.withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from));
controller.applications().restart(deploymentId, restartFilter);
return new MessageResponse("Requested restart of " + deploymentId);
}
/** Set suspension status of the given deployment. */
private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().setSuspension(deploymentId, suspend);
return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId);
}
private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) {
if ( ! type.environment().isManuallyDeployed() && ! isOperator(request))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
Optional.of(id.instance()),
Optional.of(type.zone()),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("vespaVersion", options))
.map(Version::fromString);
ensureApplicationExists(TenantAndApplicationId.from(id), request);
boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions"))
.map(json -> SlimeUtils.jsonToSlime(json).get())
.flatMap(options -> optional("dryRun", options))
.map(Boolean::valueOf)
.orElse(false);
controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request));
RunId runId = controller.jobController().last(id, type).get().id();
Slime slime = new Slime();
Cursor rootObject = slime.setObject();
rootObject.setString("message", "Deployment started in " + runId +
". This may take about 15 minutes the first time.");
rootObject.setLong("run", runId.number());
return new SlimeJsonResponse(slime);
}
private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId);
if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) {
return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API");
}
String vespaVersion = deployOptions.field("vespaVersion").asString();
if ( ! vespaVersion.isEmpty()) {
return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted");
}
VersionStatus versionStatus = controller.readVersionStatus();
if (versionStatus.isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = versionStatus.systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
DeploymentResult result = controller.applications()
.deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber());
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber());
Cursor logArray = root.setArray("prepareMessages");
for (LogEntry logMessage : result.log()) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.epochMillis());
logObject.setString("level", logMessage.level().getName());
logObject.setString("message", logMessage.message());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
boolean forget = request.getBooleanProperty("forget");
if (forget && ! isOperator(request))
return ErrorResponse.forbidden("Only operators can forget a tenant");
controller.tenants().delete(TenantName.from(tenantName),
Optional.of(accessControlRequests.credentials(TenantName.from(tenantName),
toSlime(request.getData()).get(),
request.getJDiscRequest())),
forget);
return new MessageResponse("Deleted tenant " + tenantName);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
return new MessageResponse("Deleted application " + id);
}
private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) {
TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
controller.applications().deleteInstance(id.instance(instanceName));
if (controller.applications().requireApplication(id).instances().isEmpty()) {
Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest());
controller.applications().deleteApplication(id, credentials);
}
return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString());
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
requireZone(environment, region));
controller.applications().deactivate(id.applicationId(), id.zoneId());
controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId()))
.filter(run -> ! run.hasEnded())
.ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName()));
return new MessageResponse("Deactivated " + id);
}
/** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
.getInstance(prodInstanceId).stream()
.flatMap(instance -> instance.productionDeployments().keySet().stream())
.map(zone -> new DeploymentId(prodInstanceId, zone))
.collect(Collectors.toCollection(HashSet::new));
ApplicationId toTest = type.isProduction() ? prodInstanceId : id;
if ( ! type.isProduction())
deployments.add(new DeploymentId(toTest, type.zone()));
Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone());
if (deployment == null)
throw new NotExistsException(toTest + " is not deployed in " + type.zone());
return new SlimeJsonResponse(testConfigSerializer.configSlime(id,
type,
false,
deployment.version(),
deployment.revision(),
deployment.at(),
controller.routing().readTestRunnerEndpointsOf(deployments),
controller.applications().reachableContentClustersByZone(deployments)));
}
private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null);
if (report != null) {
Cursor cursor = report.get();
boolean force = request.getBooleanProperty("force");
if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) {
throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString());
}
}
Slime requestPayload;
try {
requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes());
} catch (Exception e) {
throw new IllegalArgumentException("Missing or invalid JSON in request content", e);
}
Cursor requestPayloadCursor = requestPayload.get();
String configId = requestPayloadCursor.field("configId").asString();
long expiresAt = requestPayloadCursor.field("expiresAt").asLong();
if (configId.isEmpty()) {
throw new IllegalArgumentException("Missing configId");
}
Cursor artifactsCursor = requestPayloadCursor.field("artifacts");
int artifactEntries = artifactsCursor.entries();
if (artifactEntries == 0) {
throw new IllegalArgumentException("Missing or empty 'artifacts'");
}
Slime dumpRequest = new Slime();
Cursor dumpRequestCursor = dumpRequest.setObject();
dumpRequestCursor.setLong("createdMillis", controller.clock().millis());
dumpRequestCursor.setString("configId", configId);
Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts");
for (int i = 0; i < artifactEntries; i++) {
dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString());
}
if (expiresAt > 0) {
dumpRequestCursor.setLong("expiresAt", expiresAt);
}
Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions");
if (dumpOptionsCursor.children() > 0) {
SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions"));
}
var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest))));
nodeRepository.updateReports(zone, hostname, reportsUpdate);
boolean wait = request.getBooleanProperty("wait");
if (!wait) return new MessageResponse("Request created");
return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname);
}
private HttpResponse getServiceDump(String tenant, String application, String instance, String environment,
String region, String hostname, HttpRequest request) {
NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
ZoneId zone = requireZone(environment, region);
Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname)
.orElseThrow(() -> new NotExistsException("No service dump for node " + hostname));
return new SlimeJsonResponse(report);
}
private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
int pollInterval = 2;
Slime report;
while (true) {
report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get();
Cursor cursor = report.get();
if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) {
break;
}
final Slime copyForLambda = report;
log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda))));
log.fine("Sleeping " + pollInterval + " seconds before checking report status again");
controller.sleeper().sleep(Duration.ofSeconds(pollInterval));
}
return new SlimeJsonResponse(report);
}
private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant,
String application, String instance, String hostname) {
Node node;
try {
node = nodeRepository.getNode(zone, hostname);
} catch (IllegalArgumentException e) {
throw new NotExistsException(hostname);
}
ApplicationId app = ApplicationId.from(tenant, application, instance);
ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner"));
if (!app.equals(owner)) {
throw new IllegalArgumentException("Node is not owned by " + app.toFullString());
}
String json = node.reports().get("serviceDump");
if (json == null) return Optional.empty();
return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json));
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case cloud: {
CloudTenant cloudTenant = (CloudTenant) tenant;
cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName()));
Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys");
cloudTenant.developerKeys().forEach((key, user) -> {
Cursor keyObject = pemDeveloperKeysArray.addObject();
keyObject.setString("key", KeyUtils.toPem(key));
keyObject.setString("user", user.getName());
});
toSlime(object, cloudTenant.tenantSecretStores());
toSlime(object.setObject("integrations").setObject("aws"),
controller.serviceRegistry().roleService().getTenantRole(tenant.name()),
cloudTenant.tenantSecretStores());
try {
var usedQuota = applications.stream()
.map(Application::quotaUsage)
.reduce(QuotaUsage.none, QuotaUsage::add);
toSlime(object.setObject("quota"), usedQuota);
} catch (Exception e) {
log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e)));
}
cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role));
toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess"));
break;
}
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : applications) {
DeploymentStatus status = null;
Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values()
: application.instances().values();
if (instances.isEmpty() && !showOnlyActiveInstances(request))
toSlime(application.id(), applicationArray.addObject(), request);
for (Instance instance : instances) {
if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
} else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
}
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
private void toSlime(ArchiveAccess archiveAccess, Cursor object) {
archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role));
archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member));
}
private void toSlime(Cursor object, QuotaUsage usage) {
object.setDouble("budgetUsed", usage.rate());
}
private void toSlime(ClusterResources resources, Cursor object) {
object.setLong("nodes", resources.nodes());
object.setLong("groups", resources.groups());
toSlime(resources.nodeResources(), object.setObject("nodeResources"));
double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system());
object.setDouble("cost", cost);
}
private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
utilizationObject.setDouble("cpu", utilization.cpu());
utilizationObject.setDouble("idealCpu", utilization.idealCpu());
utilizationObject.setDouble("currentCpu", utilization.currentCpu());
utilizationObject.setDouble("peakCpu", utilization.peakCpu());
utilizationObject.setDouble("memory", utilization.memory());
utilizationObject.setDouble("idealMemory", utilization.idealMemory());
utilizationObject.setDouble("currentMemory", utilization.currentMemory());
utilizationObject.setDouble("peakMemory", utilization.peakMemory());
utilizationObject.setDouble("disk", utilization.disk());
utilizationObject.setDouble("idealDisk", utilization.idealDisk());
utilizationObject.setDouble("currentDisk", utilization.currentDisk());
utilizationObject.setDouble("peakDisk", utilization.peakDisk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
for (Cluster.ScalingEvent scalingEvent : scalingEvents) {
Cursor scalingEventObject = scalingEventsArray.addObject();
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
private void toSlime(NodeResources resources, Cursor object) {
object.setDouble("vcpu", resources.vcpu());
object.setDouble("memoryGb", resources.memoryGb());
object.setDouble("diskGb", resources.diskGb());
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tenantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case cloud: break;
case deleted: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) {
Optional<Instant> lastDev = applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> instance.deployments().values().stream()
.filter(deployment -> deployment.zone().environment() == Environment.dev)
.map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment)))
.max(Comparator.naturalOrder())
.or(() -> applications.stream()
.flatMap(application -> application.instances().values().stream())
.flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream()
.filter(job -> job.environment() == Environment.dev)
.flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream()))
.map(Run::start)
.max(Comparator.naturalOrder()));
Optional<Instant> lastSubmission = applications.stream()
.flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream())
.max(Comparator.naturalOrder());
object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli());
if (tenant.type() == Tenant.Type.deleted)
object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli());
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli()));
lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
.ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer)
.ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli()));
tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator)
.ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli()));
}
/** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/
private URI withPathAndQuery(String newPath, String newQuery, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
return withPathAndQuery(newPath, null, uri);
}
private String toPath(DeploymentId id) {
return path("/application", "v4",
"tenant", id.applicationId().tenant(),
"application", id.applicationId().application(),
"instance", id.applicationId().instance(),
"environment", id.zoneId().environment(),
"region", id.zoneId().region());
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new IllegalArgumentException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value(),
request.getUri()).toString());
}
private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
object.setString("tenant", id.tenant().value());
object.setString("application", id.application().value());
object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
"/tenant/" + id.tenant().value() +
"/application/" + id.application().value() +
"/instance/" + id.instance().value(),
request.getUri()).toString());
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) {
Cursor secretStore = object.setArray("secretStores");
tenantSecretStores.forEach(store -> {
toSlime(secretStore.addObject(), store);
});
}
private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) {
object.setString("tenantRole", tenantRoles.containerRole());
var stores = object.setArray("accounts");
tenantSecretStores.forEach(secretStore -> {
toSlime(stores.addObject(), secretStore);
});
}
private void toSlime(Cursor object, TenantSecretStore secretStore) {
object.setString("name", secretStore.getName());
object.setString("awsId", secretStore.getAwsId());
object.setString("role", secretStore.getRole());
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static boolean showOnlyProductionInstances(HttpRequest request) {
return "true".equals(request.getProperty("production"));
}
private static boolean showOnlyActiveInstances(HttpRequest request) {
return "true".equals(request.getProperty("activeInstances"));
}
private static boolean includeDeleted(HttpRequest request) {
return "true".equals(request.getProperty("includeDeleted"));
}
private static String tenantType(Tenant tenant) {
return switch (tenant.type()) {
case athenz: yield "ATHENS";
case cloud: yield "CLOUD";
case deleted: yield "DELETED";
};
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry());
}
private RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong();
projectId = projectId == 0 ? 1 : projectId;
Optional<String> repository = optional("repository", submitOptions);
Optional<String> branch = optional("branch", submitOptions);
Optional<String> commit = optional("commit", submitOptions);
Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent()
? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get()))
: Optional.empty();
Optional<String> sourceUrl = optional("sourceUrl", submitOptions);
Optional<String> authorEmail = optional("authorEmail", submitOptions);
Optional<String> description = optional("description", submitOptions);
int risk = (int) submitOptions.field("risk").asLong();
sourceUrl.map(URI::create).ifPresent(url -> {
if (url.getHost() == null || url.getScheme() == null)
throw new IllegalArgumentException("Source URL must include scheme and host");
});
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true);
byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
Optional.empty(),
Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application);
ensureApplicationExists(id, request);
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId);
}
private HttpResponse removeAllProdDeployments(String tenant, String application) {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), 0),
0);
return new MessageResponse("All deployments removed");
}
private ZoneId requireZone(String environment, String region) {
return requireZone(ZoneId.from(environment, region));
}
private ZoneId requireZone(ZoneId zone) {
if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) {
return zone;
}
if (!controller.zoneRegistry().hasZone(zone)) {
throw new IllegalArgumentException("Zone " + zone + " does not exist in this system");
}
return zone;
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("X-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
private static RotationId findRotationId(Instance instance, Optional<String> endpointId) {
if (instance.rotations().isEmpty()) {
throw new NotExistsException("global rotation does not exist for " + instance);
}
if (endpointId.isPresent()) {
return instance.rotations().stream()
.filter(r -> r.endpointId().id().equals(endpointId.get()))
.map(AssignedRotation::rotationId)
.findFirst()
.orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() +
" does not exist for " + instance));
} else if (instance.rotations().size() > 1) {
throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given");
}
return instance.rotations().get(0).rotationId();
}
private static String rotationStateString(RotationState state) {
return switch (state) {
case in: yield "IN";
case out: yield "OUT";
case unknown: yield "UNKNOWN";
};
}
private static String endpointScopeString(Endpoint.Scope scope) {
return switch (scope) {
case weighted: yield "weighted";
case application: yield "application";
case global: yield "global";
case zone: yield "zone";
};
}
private static String routingMethodString(RoutingMethod method) {
return switch (method) {
case exclusive: yield "exclusive";
case sharedLayer4: yield "sharedLayer4";
};
}
private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) {
return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
.filter(cls::isInstance)
.map(cls::cast)
.orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
}
/** Returns whether given request is by an operator */
private static boolean isOperator(HttpRequest request) {
var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
return securityContext.roles().stream()
.map(Role::definition)
.anyMatch(definition -> definition == RoleDefinition.hostedOperator);
}
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) {
if (controller.applications().getApplication(id).isEmpty()) {
if (controller.system().isPublic() || hasOktaContext(request)) {
log.fine("Application does not exist in public, creating: " + id);
var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest());
controller.applications().createApplication(id, credentials);
} else {
log.fine("Application does not exist in hosted, failing: " + id);
throw new IllegalArgumentException("Application does not exist. Create application in Console first.");
}
}
}
private boolean hasOktaContext(HttpRequest request) {
try {
OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context());
return true;
} catch (IllegalArgumentException e) {
return false;
}
}
private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) {
List<ZoneId> productionZones = spec.zones().stream()
.filter(z -> z.region().isPresent())
.map(z -> ZoneId.from(z.environment(), z.region().get()))
.toList();
return deployments.stream()
.sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone())))
.collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
} |
Should this be containsAll so that we support adding accounts? | public List<ConfigChangeAction> validate(VespaModel current, VespaModel next, ValidationOverrides overrides, Instant now) {
Set<CloudAccount> currentAccounts = cloudAccountsOf(current);
Set<CloudAccount> nextAccounts = cloudAccountsOf(next);
if (!nextAccounts.equals(currentAccounts)) {
throw new IllegalArgumentException("Cannot change cloud account(s) from " + currentAccounts +
" to " + nextAccounts + ". The existing deployment must be removed " +
"before changing accounts");
}
return List.of();
} | if (!nextAccounts.equals(currentAccounts)) { | public List<ConfigChangeAction> validate(VespaModel current, VespaModel next, ValidationOverrides overrides, Instant now) {
for (var clusterId : current.allClusters()) {
CloudAccount currentAccount = cloudAccountOf(current, clusterId);
CloudAccount nextAccount = cloudAccountOf(next, clusterId);
if (currentAccount == null || nextAccount == null) continue;
if (!nextAccount.equals(currentAccount)) {
throw new IllegalArgumentException("Cannot change cloud account from " + currentAccount +
" to " + nextAccount + ". The existing deployment must be removed " +
"before changing accounts");
}
}
return List.of();
} | class CloudAccountChangeValidator implements ChangeValidator {
@Override
private static Set<CloudAccount> cloudAccountsOf(VespaModel model) {
Set<CloudAccount> accounts = new TreeSet<>();
for (Capacity capacity : model.provisioned().all().values()) {
accounts.add(capacity.cloudAccount().orElse(CloudAccount.empty));
}
return accounts;
}
} | class CloudAccountChangeValidator implements ChangeValidator {
@Override
private static CloudAccount cloudAccountOf(VespaModel model, ClusterSpec.Id cluster) {
Capacity capacity = model.provisioned().all().get(cluster);
return capacity == null ? null : capacity.cloudAccount().orElse(CloudAccount.empty);
}
} |
Cloud account is currently set per deployment, so there can only be one today. I suggest changing it if we add support for more (e.g. per cluster). | public List<ConfigChangeAction> validate(VespaModel current, VespaModel next, ValidationOverrides overrides, Instant now) {
Set<CloudAccount> currentAccounts = cloudAccountsOf(current);
Set<CloudAccount> nextAccounts = cloudAccountsOf(next);
if (!nextAccounts.equals(currentAccounts)) {
throw new IllegalArgumentException("Cannot change cloud account(s) from " + currentAccounts +
" to " + nextAccounts + ". The existing deployment must be removed " +
"before changing accounts");
}
return List.of();
} | if (!nextAccounts.equals(currentAccounts)) { | public List<ConfigChangeAction> validate(VespaModel current, VespaModel next, ValidationOverrides overrides, Instant now) {
for (var clusterId : current.allClusters()) {
CloudAccount currentAccount = cloudAccountOf(current, clusterId);
CloudAccount nextAccount = cloudAccountOf(next, clusterId);
if (currentAccount == null || nextAccount == null) continue;
if (!nextAccount.equals(currentAccount)) {
throw new IllegalArgumentException("Cannot change cloud account from " + currentAccount +
" to " + nextAccount + ". The existing deployment must be removed " +
"before changing accounts");
}
}
return List.of();
} | class CloudAccountChangeValidator implements ChangeValidator {
@Override
private static Set<CloudAccount> cloudAccountsOf(VespaModel model) {
Set<CloudAccount> accounts = new TreeSet<>();
for (Capacity capacity : model.provisioned().all().values()) {
accounts.add(capacity.cloudAccount().orElse(CloudAccount.empty));
}
return accounts;
}
} | class CloudAccountChangeValidator implements ChangeValidator {
@Override
private static CloudAccount cloudAccountOf(VespaModel model, ClusterSpec.Id cluster) {
Capacity capacity = model.provisioned().all().get(cluster);
return capacity == null ? null : capacity.cloudAccount().orElse(CloudAccount.empty);
}
} |
Always failing if both directories exists would likely break too many applications? | private void moveSearchDefinitionsToSchemasDir(java.nio.file.Path applicationDir) throws IOException {
File schemasDir = applicationDir.resolve(ApplicationPackage.SCHEMAS_DIR.getRelative()).toFile();
File sdDir = applicationDir.resolve(ApplicationPackage.SEARCH_DEFINITIONS_DIR.getRelative()).toFile();
if (sdDir.exists() && sdDir.isDirectory()) {
try {
File[] sdFiles = sdDir.listFiles();
if (sdFiles != null) {
Files.createDirectories(schemasDir.toPath());
Arrays.asList(sdFiles).forEach(file -> Exceptions.uncheck(
() -> Files.move(file.toPath(),
schemasDir.toPath().resolve(file.toPath().getFileName()),
StandardCopyOption.REPLACE_EXISTING)));
}
Files.delete(sdDir.toPath());
} catch (IOException | UncheckedIOException e) {
if (schemasDir.exists() && schemasDir.isDirectory())
throw new InvalidApplicationException(
"Both " + ApplicationPackage.SCHEMAS_DIR.getRelative() + "/ and " + ApplicationPackage.SEARCH_DEFINITIONS_DIR +
"/ exist in application package, please remove " + ApplicationPackage.SEARCH_DEFINITIONS_DIR + "/", e);
else
throw e;
}
}
} | if (schemasDir.exists() && schemasDir.isDirectory()) | private void moveSearchDefinitionsToSchemasDir(java.nio.file.Path applicationDir) throws IOException {
File schemasDir = applicationDir.resolve(ApplicationPackage.SCHEMAS_DIR.getRelative()).toFile();
File sdDir = applicationDir.resolve(ApplicationPackage.SEARCH_DEFINITIONS_DIR.getRelative()).toFile();
if (sdDir.exists() && sdDir.isDirectory()) {
try {
File[] sdFiles = sdDir.listFiles();
if (sdFiles != null) {
Files.createDirectories(schemasDir.toPath());
Arrays.asList(sdFiles).forEach(file -> Exceptions.uncheck(
() -> Files.move(file.toPath(),
schemasDir.toPath().resolve(file.toPath().getFileName()),
StandardCopyOption.REPLACE_EXISTING)));
}
Files.delete(sdDir.toPath());
} catch (IOException | UncheckedIOException e) {
if (schemasDir.exists() && schemasDir.isDirectory())
throw new InvalidApplicationException(
"Both " + ApplicationPackage.SCHEMAS_DIR.getRelative() + "/ and " + ApplicationPackage.SEARCH_DEFINITIONS_DIR +
"/ exist in application package, please remove " + ApplicationPackage.SEARCH_DEFINITIONS_DIR + "/", e);
else
throw e;
}
}
} | class SessionRepository {
private static final Logger log = Logger.getLogger(SessionRepository.class.getName());
private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+");
private static final long nonExistingActiveSessionId = 0;
private final Object monitor = new Object();
private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>());
private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>());
private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>());
private final Duration sessionLifetime;
private final Clock clock;
private final Curator curator;
private final Executor zkWatcherExecutor;
private final FileDistributionFactory fileDistributionFactory;
private final PermanentApplicationPackage permanentApplicationPackage;
private final FlagSource flagSource;
private final TenantFileSystemDirs tenantFileSystemDirs;
private final Metrics metrics;
private final MetricUpdater metricUpdater;
private final Curator.DirectoryCache directoryCache;
private final TenantApplications applicationRepo;
private final SessionPreparer sessionPreparer;
private final Path sessionsPath;
private final TenantName tenantName;
private final SessionCounter sessionCounter;
private final SecretStore secretStore;
private final HostProvisionerProvider hostProvisionerProvider;
private final ConfigserverConfig configserverConfig;
private final ConfigServerDB configServerDB;
private final Zone zone;
private final ModelFactoryRegistry modelFactoryRegistry;
private final ConfigDefinitionRepo configDefinitionRepo;
private final int maxNodeSize;
public SessionRepository(TenantName tenantName,
TenantApplications applicationRepo,
SessionPreparer sessionPreparer,
Curator curator,
Metrics metrics,
StripedExecutor<TenantName> zkWatcherExecutor,
FileDistributionFactory fileDistributionFactory,
PermanentApplicationPackage permanentApplicationPackage,
FlagSource flagSource,
ExecutorService zkCacheExecutor,
SecretStore secretStore,
HostProvisionerProvider hostProvisionerProvider,
ConfigserverConfig configserverConfig,
ConfigServerDB configServerDB,
Zone zone,
Clock clock,
ModelFactoryRegistry modelFactoryRegistry,
ConfigDefinitionRepo configDefinitionRepo,
int maxNodeSize) {
this.tenantName = tenantName;
sessionCounter = new SessionCounter(curator, tenantName);
this.sessionsPath = TenantRepository.getSessionsPath(tenantName);
this.clock = clock;
this.curator = curator;
this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime());
this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command);
this.fileDistributionFactory = fileDistributionFactory;
this.permanentApplicationPackage = permanentApplicationPackage;
this.flagSource = flagSource;
this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName);
this.applicationRepo = applicationRepo;
this.sessionPreparer = sessionPreparer;
this.metrics = metrics;
this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName));
this.secretStore = secretStore;
this.hostProvisionerProvider = hostProvisionerProvider;
this.configserverConfig = configserverConfig;
this.configServerDB = configServerDB;
this.zone = zone;
this.modelFactoryRegistry = modelFactoryRegistry;
this.configDefinitionRepo = configDefinitionRepo;
this.maxNodeSize = maxNodeSize;
loadSessions();
this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor);
this.directoryCache.addListener(this::childEvent);
this.directoryCache.start();
}
private void loadSessions() {
ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()),
new DaemonThreadFactory("load-sessions-"));
loadSessions(executor);
}
void loadSessions(ExecutorService executor) {
loadRemoteSessions(executor);
try {
executor.shutdown();
if ( ! executor.awaitTermination(1, TimeUnit.MINUTES))
log.log(Level.INFO, "Executor did not terminate");
} catch (InterruptedException e) {
log.log(Level.WARNING, "Shutdown of executor for loading sessions failed: " + Exceptions.toMessageString(e));
}
}
public void addLocalSession(LocalSession session) {
long sessionId = session.getSessionId();
localSessionCache.put(sessionId, session);
if (remoteSessionCache.get(sessionId) == null)
createRemoteSession(sessionId);
}
public LocalSession getLocalSession(long sessionId) {
return localSessionCache.get(sessionId);
}
/** Returns a copy of local sessions */
public Collection<LocalSession> getLocalSessions() {
return List.copyOf(localSessionCache.values());
}
public Set<LocalSession> getLocalSessionsFromFileSystem() {
File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter);
if (sessions == null) return Set.of();
Set<LocalSession> sessionIds = new HashSet<>();
for (File session : sessions) {
long sessionId = Long.parseLong(session.getName());
LocalSession localSession = getSessionFromFile(sessionId);
sessionIds.add(localSession);
}
return sessionIds;
}
private LocalSession getSessionFromFile(long sessionId) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
File sessionDir = getAndValidateExistingSessionAppDir(sessionId);
ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir);
return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient);
}
public Set<Long> getLocalSessionsIdsFromFileSystem() {
File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter);
if (sessions == null) return Set.of();
Set<Long> sessionIds = new HashSet<>();
for (File session : sessions) {
long sessionId = Long.parseLong(session.getName());
sessionIds.add(sessionId);
}
return sessionIds;
}
public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) {
params.vespaVersion().ifPresent(version -> {
if ( ! params.isBootstrap() && ! modelFactoryRegistry.allVersions().contains(version))
throw new UnknownVespaVersionException("Vespa version '" + version + "' not known by this config server");
});
applicationRepo.createApplication(params.getApplicationId());
logger.log(Level.FINE, "Created application " + params.getApplicationId());
long sessionId = session.getSessionId();
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId);
Optional<CompletionWaiter> waiter = params.isDryRun()
? Optional.empty()
: Optional.of(sessionZooKeeperClient.createPrepareWaiter());
Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId());
ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params,
activeApplicationSet, now, getSessionAppDir(sessionId),
session.getApplicationPackage(), sessionZooKeeperClient)
.getConfigChangeActions();
setPrepared(session);
waiter.ifPresent(w -> w.awaitCompletion(params.getTimeoutBudget().timeLeft()));
return actions;
}
/**
* Creates a new deployment session from an already existing session.
*
* @param existingSession the session to use as base
* @param internalRedeploy whether this session is for a system internal redeploy — not an application package change
* @param timeoutBudget timeout for creating session and waiting for other servers.
* @return a new session
*/
public LocalSession createSessionFromExisting(Session existingSession,
boolean internalRedeploy,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
ApplicationId existingApplicationId = existingSession.getApplicationId();
Tags existingTags = existingSession.getTags();
File existingApp = getSessionAppDir(existingSession.getSessionId());
LocalSession session = createSessionFromApplication(existingApp,
existingApplicationId,
existingTags,
internalRedeploy,
timeoutBudget,
deployLogger);
session.setApplicationId(existingApplicationId);
session.setTags(existingTags);
session.setApplicationPackageReference(existingSession.getApplicationPackageReference());
session.setVespaVersion(existingSession.getVespaVersion());
session.setDockerImageRepository(existingSession.getDockerImageRepository());
session.setAthenzDomain(existingSession.getAthenzDomain());
session.setTenantSecretStores(existingSession.getTenantSecretStores());
session.setOperatorCertificates(existingSession.getOperatorCertificates());
session.setCloudAccount(existingSession.getCloudAccount());
return session;
}
/**
* Creates a new deployment session from an application package.
*
* @param applicationDirectory a File pointing to an application.
* @param applicationId application id for this new session.
* @param timeoutBudget Timeout for creating session and waiting for other servers.
* @return a new session
*/
public LocalSession createSessionFromApplicationPackage(File applicationDirectory,
ApplicationId applicationId,
Tags tags,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
applicationRepo.createApplication(applicationId);
return createSessionFromApplication(applicationDirectory, applicationId, tags, false, timeoutBudget, deployLogger);
}
/**
* Creates a local session based on a remote session and the distributed application package.
* Does not wait for session being created on other servers.
*/
private void createLocalSession(File applicationFile, ApplicationId applicationId, Tags tags, long sessionId) {
try {
ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, tags, sessionId, false, Optional.empty());
createLocalSession(sessionId, applicationPackage);
} catch (Exception e) {
throw new RuntimeException("Error creating session " + sessionId, e);
}
}
public void deleteLocalSession(long sessionId) {
log.log(Level.FINE, () -> "Deleting local session " + sessionId);
SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId);
if (watcher != null) watcher.close();
localSessionCache.remove(sessionId);
NestedTransaction transaction = new NestedTransaction();
transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath())));
transaction.commit();
}
private void deleteAllSessions() {
for (LocalSession session : getLocalSessions()) {
deleteLocalSession(session.getSessionId());
}
}
public RemoteSession getRemoteSession(long sessionId) {
return remoteSessionCache.get(sessionId);
}
/** Returns a copy of remote sessions */
public Collection<RemoteSession> getRemoteSessions() {
return List.copyOf(remoteSessionCache.values());
}
public List<Long> getRemoteSessionsFromZooKeeper() {
return getSessionList(curator.getChildren(sessionsPath));
}
public RemoteSession createRemoteSession(long sessionId) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient);
loadSessionIfActive(session);
remoteSessionCache.put(sessionId, session);
updateSessionStateWatcher(sessionId);
return session;
}
public int deleteExpiredRemoteSessions(Clock clock) {
Duration expiryTime = configserverConfig.hostedVespa()
? sessionLifetime.multipliedBy(2)
: sessionLifetime.multipliedBy(12);
List<Long> remoteSessionsFromZooKeeper = getRemoteSessionsFromZooKeeper();
log.log(Level.FINE, () -> "Remote sessions for tenant " + tenantName + ": " + remoteSessionsFromZooKeeper);
int deleted = 0;
int deleteMax = (int) Math.min(1000, Math.max(10, remoteSessionsFromZooKeeper.size() * 0.01));
for (long sessionId : remoteSessionsFromZooKeeper) {
Session session = remoteSessionCache.get(sessionId);
if (session == null)
session = new RemoteSession(tenantName, sessionId, createSessionZooKeeperClient(sessionId));
if (session.getStatus() == Session.Status.ACTIVATE) continue;
if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) {
log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it");
deleteRemoteSessionFromZooKeeper(session);
deleted++;
}
if (deleted >= deleteMax)
break;
}
return deleted;
}
public void deactivateSession(long sessionId) {
var s = remoteSessionCache.get(sessionId);
if (s == null) return;
remoteSessionCache.put(sessionId, s.deactivated());
}
public void deleteRemoteSessionFromZooKeeper(Session session) {
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId());
Transaction transaction = sessionZooKeeperClient.deleteTransaction();
transaction.commit();
transaction.close();
}
private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) {
return created.plus(expiryTime).isBefore(clock.instant());
}
private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) {
return getSessionList(children.stream()
.map(child -> Path.fromString(child.getPath()).getName())
.collect(Collectors.toList()));
}
private List<Long> getSessionList(List<String> children) {
return children.stream().map(Long::parseLong).collect(Collectors.toList());
}
private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException {
Map<Long, Future<?>> futures = new HashMap<>();
for (long sessionId : getRemoteSessionsFromZooKeeper()) {
futures.put(sessionId, executor.submit(() -> sessionAdded(sessionId)));
}
futures.forEach((sessionId, future) -> {
try {
future.get();
log.log(Level.FINE, () -> "Remote session " + sessionId + " loaded");
} catch (ExecutionException | InterruptedException e) {
throw new RuntimeException("Could not load remote session " + sessionId, e);
}
});
}
/**
* A session for which we don't have a watcher, i.e. hitherto unknown to us.
*
* @param sessionId session id for the new session
*/
public void sessionAdded(long sessionId) {
if (hasStatusDeleted(sessionId)) return;
log.log(Level.FINE, () -> "Adding remote session " + sessionId);
Session session = createRemoteSession(sessionId);
if (session.getStatus() == Session.Status.NEW) {
log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId);
confirmUpload(session);
}
createLocalSessionFromDistributedApplicationPackage(sessionId);
}
private boolean hasStatusDeleted(long sessionId) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient);
return session.getStatus() == Session.Status.DELETE;
}
void activate(long sessionId) {
createLocalSessionFromDistributedApplicationPackage(sessionId);
RemoteSession session = remoteSessionCache.get(sessionId);
if (session == null) return;
CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter();
log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId);
applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId);
log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter);
notifyCompletion(waiter);
log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId);
}
private void loadSessionIfActive(RemoteSession session) {
for (ApplicationId applicationId : applicationRepo.activeApplications()) {
Optional<Long> activeSession = applicationRepo.activeSessionOf(applicationId);
if (activeSession.isPresent() && activeSession.get() == session.getSessionId()) {
log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it");
applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId());
log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")");
return;
}
}
}
void prepareRemoteSession(long sessionId) {
createLocalSessionFromDistributedApplicationPackage(sessionId);
RemoteSession session = remoteSessionCache.get(sessionId);
if (session == null) return;
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId);
CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter();
ensureApplicationLoaded(session);
notifyCompletion(waiter);
}
public ApplicationSet ensureApplicationLoaded(RemoteSession session) {
if (session.applicationSet().isPresent()) {
return session.applicationSet().get();
}
Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId());
Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan)
.flatMap(this::getApplicationSet);
ApplicationSet applicationSet = loadApplication(session, previousApplicationSet);
RemoteSession activated = session.activated(applicationSet);
long sessionId = activated.getSessionId();
remoteSessionCache.put(sessionId, activated);
updateSessionStateWatcher(sessionId);
return applicationSet;
}
void confirmUpload(Session session) {
CompletionWaiter waiter = createSessionZooKeeperClient(session.getSessionId()).getUploadWaiter();
long sessionId = session.getSessionId();
log.log(Level.FINE, () -> "Notifying upload waiter for session " + sessionId);
notifyCompletion(waiter);
log.log(Level.FINE, () -> "Done notifying upload for session " + sessionId);
}
void notifyCompletion(CompletionWaiter completionWaiter) {
try {
completionWaiter.notifyCompletion();
} catch (RuntimeException e) {
Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class,
KeeperException.NodeExistsException.class);
Class<? extends Throwable> exceptionClass = e.getCause().getClass();
if (acceptedExceptions.contains(exceptionClass))
log.log(Level.FINE, () -> "Not able to notify completion for session (" + completionWaiter + ")," +
" node " + (exceptionClass.equals(KeeperException.NoNodeException.class)
? "has been deleted"
: "already exists"));
else
throw e;
}
}
private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) {
log.log(Level.FINE, () -> "Loading application for " + session);
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId());
ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage();
ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(),
session.getSessionId(),
sessionZooKeeperClient,
previousApplicationSet,
sessionPreparer.getExecutor(),
curator,
metrics,
permanentApplicationPackage,
flagSource,
secretStore,
hostProvisionerProvider,
configserverConfig,
zone,
modelFactoryRegistry,
configDefinitionRepo);
return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(),
sessionZooKeeperClient.readDockerImageRepository(),
sessionZooKeeperClient.readVespaVersion(),
applicationPackage,
new AllocatedHostsFromAllModels(),
clock.instant()));
}
private void nodeChanged() {
zkWatcherExecutor.execute(() -> {
Multiset<Session.Status> sessionMetrics = HashMultiset.create();
getRemoteSessions().forEach(session -> sessionMetrics.add(session.getStatus()));
metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW));
metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE));
metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE));
metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE));
});
}
@SuppressWarnings("unused")
private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) {
zkWatcherExecutor.execute(() -> {
log.log(Level.FINE, () -> "Got child event: " + event);
switch (event.getType()) {
case CHILD_ADDED:
case CHILD_REMOVED:
case CONNECTION_RECONNECTED:
sessionsChanged();
break;
default:
break;
}
});
}
public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) {
log.log(Level.FINE, () -> "Deleting expired local sessions for tenant '" + tenantName + "'");
Set<Long> sessionIdsToDelete = new HashSet<>();
Set<Long> newSessions = findNewSessionsInFileSystem();
try {
for (long sessionId : getLocalSessionsIdsFromFileSystem()) {
if (newSessions.contains(sessionId))
continue;
var sessionZooKeeperClient = createSessionZooKeeperClient(sessionId);
Instant createTime = sessionZooKeeperClient.readCreateTime();
Session.Status status = sessionZooKeeperClient.readStatus();
log.log(Level.FINE, () -> "Candidate local session for deletion: " + sessionId +
", created: " + createTime + ", status " + status + ", can be deleted: " + canBeDeleted(sessionId, status) +
", hasExpired: " + hasExpired(createTime));
if (hasExpired(createTime) && canBeDeleted(sessionId, status)) {
log.log(Level.FINE, () -> "expired: " + hasExpired(createTime) + ", can be deleted: " + canBeDeleted(sessionId, status));
sessionIdsToDelete.add(sessionId);
} else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) {
LocalSession session;
log.log(Level.FINE, () -> "not expired, but more than 1 day old: " + sessionId);
try {
session = getSessionFromFile(sessionId);
} catch (Exception e) {
log.log(Level.FINE, () -> "could not get session from file: " + sessionId + ": " + e.getMessage());
continue;
}
Optional<ApplicationId> applicationId = session.getOptionalApplicationId();
if (applicationId.isEmpty()) continue;
Long activeSession = activeSessions.get(applicationId.get());
if (activeSession == null || activeSession != sessionId) {
sessionIdsToDelete.add(sessionId);
log.log(Level.FINE, () -> "Will delete inactive session " + sessionId + " created " +
createTime + " for '" + applicationId + "'");
}
}
}
sessionIdsToDelete.forEach(this::deleteLocalSession);
} catch (Throwable e) {
log.log(Level.WARNING, "Error when purging old sessions ", e);
}
log.log(Level.FINE, () -> "Done purging old sessions");
}
private boolean hasExpired(Instant created) {
return created.plus(sessionLifetime).isBefore(clock.instant());
}
private boolean canBeDeleted(long sessionId, Session.Status status) {
return ( ! List.of(Session.Status.UNKNOWN, Session.Status.ACTIVATE).contains(status))
|| oldSessionDirWithUnknownStatus(sessionId, status);
}
private boolean oldSessionDirWithUnknownStatus(long sessionId, Session.Status status) {
Duration expiryTime = Duration.ofHours(configserverConfig.keepSessionsWithUnknownStatusHours());
File sessionDir = tenantFileSystemDirs.getUserApplicationDir(sessionId);
return sessionDir.exists()
&& status == Session.Status.UNKNOWN
&& created(sessionDir).plus(expiryTime).isBefore(clock.instant());
}
private Set<Long> findNewSessionsInFileSystem() {
File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter);
Set<Long> newSessions = new HashSet<>();
if (sessions != null) {
for (File session : sessions) {
try {
if (Files.getLastModifiedTime(session.toPath()).toInstant()
.isAfter(clock.instant().minus(Duration.ofSeconds(30))))
newSessions.add(Long.parseLong(session.getName()));
} catch (IOException e) {
log.log(Level.FINE, "Unable to find last modified time for " + session.toPath());
}
}
}
return newSessions;
}
private Instant created(File file) {
BasicFileAttributes fileAttributes;
try {
fileAttributes = readAttributes(file.toPath(), BasicFileAttributes.class);
return fileAttributes.creationTime().toInstant();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void ensureSessionPathDoesNotExist(long sessionId) {
Path sessionPath = getSessionPath(sessionId);
if (curator.exists(sessionPath)) {
throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper");
}
}
private ApplicationPackage createApplication(File userDir,
File configApplicationDir,
ApplicationId applicationId,
Tags tags,
long sessionId,
Optional<Long> currentlyActiveSessionId,
boolean internalRedeploy,
Optional<DeployLogger> deployLogger) {
long deployTimestamp = System.currentTimeMillis();
DeployData deployData = new DeployData(userDir.getAbsolutePath(), applicationId, tags, deployTimestamp, internalRedeploy,
sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId));
FilesApplicationPackage app = FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData);
validateFileExtensions(applicationId, deployLogger, app);
return app;
}
private void validateFileExtensions(ApplicationId applicationId, Optional<DeployLogger> deployLogger, FilesApplicationPackage app) {
try {
app.validateFileExtensions();
} catch (IllegalArgumentException e) {
if (configserverConfig.hostedVespa()) {
UnboundStringFlag flag = PermanentFlags.APPLICATION_FILES_WITH_UNKNOWN_EXTENSION;
String value = flag.bindTo(flagSource).with(APPLICATION_ID, applicationId.serializedForm()).value();
switch (value) {
case "FAIL" -> throw new InvalidApplicationException(e);
case "LOG" -> deployLogger.ifPresent(logger -> logger.logApplicationPackage(Level.WARNING, e.getMessage()));
default -> log.log(Level.WARNING, "Unknown value for flag " + flag.id() + ": " + value);
}
} else {
deployLogger.ifPresent(logger -> logger.logApplicationPackage(Level.WARNING, e.getMessage()));
}
}
}
private LocalSession createSessionFromApplication(File applicationDirectory,
ApplicationId applicationId,
Tags tags,
boolean internalRedeploy,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
long sessionId = getNextSessionId();
try {
ensureSessionPathDoesNotExist(sessionId);
ApplicationPackage app = createApplicationPackage(applicationDirectory, applicationId, tags, sessionId, internalRedeploy, Optional.of(deployLogger));
log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper");
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
sessionZKClient.createNewSession(clock.instant());
CompletionWaiter waiter = sessionZKClient.getUploadWaiter();
LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient);
waiter.awaitCompletion(Duration.ofSeconds(Math.min(120, timeoutBudget.timeLeft().getSeconds())));
addLocalSession(session);
return session;
} catch (IOException e) {
throw new RuntimeException("Error creating session " + sessionId, e);
}
}
private ApplicationPackage createApplicationPackage(File applicationDirectory,
ApplicationId applicationId,
Tags tags,
long sessionId,
boolean internalRedeploy,
Optional<DeployLogger> deployLogger) throws IOException {
synchronized (monitor) {
Optional<Long> activeSessionId = getActiveSessionId(applicationId);
File userApplicationDir = getSessionAppDir(sessionId);
copyApp(applicationDirectory, userApplicationDir);
ApplicationPackage applicationPackage = createApplication(applicationDirectory,
userApplicationDir,
applicationId,
tags,
sessionId,
activeSessionId,
internalRedeploy,
deployLogger);
applicationPackage.writeMetaData();
return applicationPackage;
}
}
public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) {
return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet);
}
private Optional<ApplicationSet> getApplicationSet(long sessionId) {
Optional<ApplicationSet> applicationSet = Optional.empty();
try {
applicationSet = Optional.ofNullable(getRemoteSession(sessionId)).map(this::ensureApplicationLoaded);
} catch (IllegalArgumentException e) {
}
return applicationSet;
}
private void copyApp(File sourceDir, File destinationDir) throws IOException {
if (destinationDir.exists()) {
log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied");
return;
}
if (! sourceDir.isDirectory())
throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory");
java.nio.file.Path tempDestinationDir = null;
try {
tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package");
log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath());
IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile());
moveSearchDefinitionsToSchemasDir(tempDestinationDir);
log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath());
Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE);
} finally {
if (tempDestinationDir != null)
IOUtils.recursiveDeleteDir(tempDestinationDir.toFile());
}
}
/**
* Returns a new session instance for the given session id.
*/
void createSessionFromId(long sessionId) {
File sessionDir = getAndValidateExistingSessionAppDir(sessionId);
ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir);
createLocalSession(sessionId, applicationPackage);
}
void createLocalSession(long sessionId, ApplicationPackage applicationPackage) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient);
addLocalSession(session);
}
/**
* Create a new local session for the given session id if it does not already exist.
* Will also add the session to the local session cache if necessary. If there is no
* remote session matching the session it will also be created.
*/
public void createLocalSessionFromDistributedApplicationPackage(long sessionId) {
if (applicationRepo.sessionExistsInFileSystem(sessionId)) {
log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists");
createSessionFromId(sessionId);
return;
}
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
FileReference fileReference = sessionZKClient.readApplicationPackageReference();
log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference);
if (fileReference != null) {
File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir()));
File sessionDir;
FileDirectory fileDirectory = new FileDirectory(rootDir);
try {
sessionDir = fileDirectory.getFile(fileReference);
} catch (IllegalArgumentException e) {
log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory);
return;
}
ApplicationId applicationId = sessionZKClient.readApplicationId()
.orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId));
log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId);
try {
createLocalSession(sessionDir, applicationId, sessionZKClient.readTags(), sessionId);
} finally {
log.log(Level.FINE, "Deleting file distribution reference for app package with session id " + sessionDir);
IOUtils.recursiveDeleteDir(sessionDir);
}
}
}
private Optional<Long> getActiveSessionId(ApplicationId applicationId) {
return applicationRepo.activeSessionOf(applicationId);
}
private long getNextSessionId() {
return sessionCounter.nextSessionId();
}
public Path getSessionPath(long sessionId) {
return sessionsPath.append(String.valueOf(sessionId));
}
Path getSessionStatePath(long sessionId) {
return getSessionPath(sessionId).append(ZKApplication.SESSIONSTATE_ZK_SUBPATH);
}
public SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) {
return new SessionZooKeeperClient(curator,
tenantName,
sessionId,
configserverConfig.serverId(),
fileDistributionFactory.createFileManager(getSessionAppDir(sessionId)),
maxNodeSize);
}
private File getAndValidateExistingSessionAppDir(long sessionId) {
File appDir = getSessionAppDir(sessionId);
if (!appDir.exists() || !appDir.isDirectory()) {
throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId);
}
return appDir;
}
private File getSessionAppDir(long sessionId) {
return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId);
}
private void updateSessionStateWatcher(long sessionId) {
sessionStateWatchers.computeIfAbsent(sessionId, (id) -> {
Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(id).getAbsolute(), false);
fileCache.addListener(this::nodeChanged);
return new SessionStateWatcher(fileCache, id, metricUpdater, zkWatcherExecutor, this);
});
}
@Override
public String toString() {
return getLocalSessions().toString();
}
public Clock clock() { return clock; }
public void close() {
deleteAllSessions();
tenantFileSystemDirs.delete();
try {
if (directoryCache != null) {
directoryCache.close();
}
} catch (Exception e) {
log.log(Level.WARNING, "Exception when closing path cache", e);
} finally {
checkForRemovedSessions(new ArrayList<>());
}
}
private void sessionsChanged() throws NumberFormatException {
List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData());
checkForRemovedSessions(sessions);
checkForAddedSessions(sessions);
}
private void checkForRemovedSessions(List<Long> existingSessions) {
for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) {
long sessionId = it.next().sessionId;
if (existingSessions.contains(sessionId)) continue;
SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId);
if (watcher != null) watcher.close();
it.remove();
metricUpdater.incRemovedSessions();
}
}
private void checkForAddedSessions(List<Long> sessions) {
for (Long sessionId : sessions)
if (remoteSessionCache.get(sessionId) == null)
sessionAdded(sessionId);
}
public Transaction createActivateTransaction(Session session) {
Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE);
transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations());
return transaction;
}
public Transaction createSetStatusTransaction(Session session, Session.Status status) {
return session.sessionZooKeeperClient.createWriteStatusTransaction(status);
}
void setPrepared(Session session) {
session.setStatus(Session.Status.PREPARE);
}
private static class FileTransaction extends AbstractTransaction {
public static FileTransaction from(FileOperation operation) {
FileTransaction transaction = new FileTransaction();
transaction.add(operation);
return transaction;
}
@Override
public void prepare() { }
@Override
public void commit() {
for (Operation operation : operations())
((FileOperation)operation).commit();
}
}
/** Factory for file operations */
private static class FileOperations {
/** Creates an operation which recursively deletes the given path */
public static DeleteOperation delete(String pathToDelete) {
return new DeleteOperation(pathToDelete);
}
}
private interface FileOperation extends Transaction.Operation {
void commit();
}
/**
* Recursively deletes this path and everything below.
* Succeeds with no action if the path does not exist.
*/
private static class DeleteOperation implements FileOperation {
private final String pathToDelete;
DeleteOperation(String pathToDelete) {
this.pathToDelete = pathToDelete;
}
@Override
public void commit() {
IOUtils.recursiveDeleteDir(new File(pathToDelete));
}
}
} | class SessionRepository {
private static final Logger log = Logger.getLogger(SessionRepository.class.getName());
private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+");
private static final long nonExistingActiveSessionId = 0;
private final Object monitor = new Object();
private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>());
private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>());
private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>());
private final Duration sessionLifetime;
private final Clock clock;
private final Curator curator;
private final Executor zkWatcherExecutor;
private final FileDistributionFactory fileDistributionFactory;
private final PermanentApplicationPackage permanentApplicationPackage;
private final FlagSource flagSource;
private final TenantFileSystemDirs tenantFileSystemDirs;
private final Metrics metrics;
private final MetricUpdater metricUpdater;
private final Curator.DirectoryCache directoryCache;
private final TenantApplications applicationRepo;
private final SessionPreparer sessionPreparer;
private final Path sessionsPath;
private final TenantName tenantName;
private final SessionCounter sessionCounter;
private final SecretStore secretStore;
private final HostProvisionerProvider hostProvisionerProvider;
private final ConfigserverConfig configserverConfig;
private final ConfigServerDB configServerDB;
private final Zone zone;
private final ModelFactoryRegistry modelFactoryRegistry;
private final ConfigDefinitionRepo configDefinitionRepo;
private final int maxNodeSize;
public SessionRepository(TenantName tenantName,
TenantApplications applicationRepo,
SessionPreparer sessionPreparer,
Curator curator,
Metrics metrics,
StripedExecutor<TenantName> zkWatcherExecutor,
FileDistributionFactory fileDistributionFactory,
PermanentApplicationPackage permanentApplicationPackage,
FlagSource flagSource,
ExecutorService zkCacheExecutor,
SecretStore secretStore,
HostProvisionerProvider hostProvisionerProvider,
ConfigserverConfig configserverConfig,
ConfigServerDB configServerDB,
Zone zone,
Clock clock,
ModelFactoryRegistry modelFactoryRegistry,
ConfigDefinitionRepo configDefinitionRepo,
int maxNodeSize) {
this.tenantName = tenantName;
sessionCounter = new SessionCounter(curator, tenantName);
this.sessionsPath = TenantRepository.getSessionsPath(tenantName);
this.clock = clock;
this.curator = curator;
this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime());
this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command);
this.fileDistributionFactory = fileDistributionFactory;
this.permanentApplicationPackage = permanentApplicationPackage;
this.flagSource = flagSource;
this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName);
this.applicationRepo = applicationRepo;
this.sessionPreparer = sessionPreparer;
this.metrics = metrics;
this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName));
this.secretStore = secretStore;
this.hostProvisionerProvider = hostProvisionerProvider;
this.configserverConfig = configserverConfig;
this.configServerDB = configServerDB;
this.zone = zone;
this.modelFactoryRegistry = modelFactoryRegistry;
this.configDefinitionRepo = configDefinitionRepo;
this.maxNodeSize = maxNodeSize;
loadSessions();
this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor);
this.directoryCache.addListener(this::childEvent);
this.directoryCache.start();
}
private void loadSessions() {
ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()),
new DaemonThreadFactory("load-sessions-"));
loadSessions(executor);
}
void loadSessions(ExecutorService executor) {
loadRemoteSessions(executor);
try {
executor.shutdown();
if ( ! executor.awaitTermination(1, TimeUnit.MINUTES))
log.log(Level.INFO, "Executor did not terminate");
} catch (InterruptedException e) {
log.log(Level.WARNING, "Shutdown of executor for loading sessions failed: " + Exceptions.toMessageString(e));
}
}
public void addLocalSession(LocalSession session) {
long sessionId = session.getSessionId();
localSessionCache.put(sessionId, session);
if (remoteSessionCache.get(sessionId) == null)
createRemoteSession(sessionId);
}
public LocalSession getLocalSession(long sessionId) {
return localSessionCache.get(sessionId);
}
/** Returns a copy of local sessions */
public Collection<LocalSession> getLocalSessions() {
return List.copyOf(localSessionCache.values());
}
public Set<LocalSession> getLocalSessionsFromFileSystem() {
File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter);
if (sessions == null) return Set.of();
Set<LocalSession> sessionIds = new HashSet<>();
for (File session : sessions) {
long sessionId = Long.parseLong(session.getName());
LocalSession localSession = getSessionFromFile(sessionId);
sessionIds.add(localSession);
}
return sessionIds;
}
private LocalSession getSessionFromFile(long sessionId) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
File sessionDir = getAndValidateExistingSessionAppDir(sessionId);
ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir);
return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient);
}
public Set<Long> getLocalSessionsIdsFromFileSystem() {
File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter);
if (sessions == null) return Set.of();
Set<Long> sessionIds = new HashSet<>();
for (File session : sessions) {
long sessionId = Long.parseLong(session.getName());
sessionIds.add(sessionId);
}
return sessionIds;
}
public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) {
params.vespaVersion().ifPresent(version -> {
if ( ! params.isBootstrap() && ! modelFactoryRegistry.allVersions().contains(version))
throw new UnknownVespaVersionException("Vespa version '" + version + "' not known by this config server");
});
applicationRepo.createApplication(params.getApplicationId());
logger.log(Level.FINE, "Created application " + params.getApplicationId());
long sessionId = session.getSessionId();
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId);
Optional<CompletionWaiter> waiter = params.isDryRun()
? Optional.empty()
: Optional.of(sessionZooKeeperClient.createPrepareWaiter());
Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId());
ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params,
activeApplicationSet, now, getSessionAppDir(sessionId),
session.getApplicationPackage(), sessionZooKeeperClient)
.getConfigChangeActions();
setPrepared(session);
waiter.ifPresent(w -> w.awaitCompletion(params.getTimeoutBudget().timeLeft()));
return actions;
}
/**
* Creates a new deployment session from an already existing session.
*
* @param existingSession the session to use as base
* @param internalRedeploy whether this session is for a system internal redeploy — not an application package change
* @param timeoutBudget timeout for creating session and waiting for other servers.
* @return a new session
*/
public LocalSession createSessionFromExisting(Session existingSession,
boolean internalRedeploy,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
ApplicationId existingApplicationId = existingSession.getApplicationId();
Tags existingTags = existingSession.getTags();
File existingApp = getSessionAppDir(existingSession.getSessionId());
LocalSession session = createSessionFromApplication(existingApp,
existingApplicationId,
existingTags,
internalRedeploy,
timeoutBudget,
deployLogger);
session.setApplicationId(existingApplicationId);
session.setTags(existingTags);
session.setApplicationPackageReference(existingSession.getApplicationPackageReference());
session.setVespaVersion(existingSession.getVespaVersion());
session.setDockerImageRepository(existingSession.getDockerImageRepository());
session.setAthenzDomain(existingSession.getAthenzDomain());
session.setTenantSecretStores(existingSession.getTenantSecretStores());
session.setOperatorCertificates(existingSession.getOperatorCertificates());
session.setCloudAccount(existingSession.getCloudAccount());
return session;
}
/**
* Creates a new deployment session from an application package.
*
* @param applicationDirectory a File pointing to an application.
* @param applicationId application id for this new session.
* @param timeoutBudget Timeout for creating session and waiting for other servers.
* @return a new session
*/
public LocalSession createSessionFromApplicationPackage(File applicationDirectory,
ApplicationId applicationId,
Tags tags,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
applicationRepo.createApplication(applicationId);
return createSessionFromApplication(applicationDirectory, applicationId, tags, false, timeoutBudget, deployLogger);
}
/**
* Creates a local session based on a remote session and the distributed application package.
* Does not wait for session being created on other servers.
*/
private void createLocalSession(File applicationFile, ApplicationId applicationId, Tags tags, long sessionId) {
try {
ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, tags, sessionId, false, Optional.empty());
createLocalSession(sessionId, applicationPackage);
} catch (Exception e) {
throw new RuntimeException("Error creating session " + sessionId, e);
}
}
public void deleteLocalSession(long sessionId) {
log.log(Level.FINE, () -> "Deleting local session " + sessionId);
SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId);
if (watcher != null) watcher.close();
localSessionCache.remove(sessionId);
NestedTransaction transaction = new NestedTransaction();
transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath())));
transaction.commit();
}
private void deleteAllSessions() {
for (LocalSession session : getLocalSessions()) {
deleteLocalSession(session.getSessionId());
}
}
public RemoteSession getRemoteSession(long sessionId) {
return remoteSessionCache.get(sessionId);
}
/** Returns a copy of remote sessions */
public Collection<RemoteSession> getRemoteSessions() {
return List.copyOf(remoteSessionCache.values());
}
public List<Long> getRemoteSessionsFromZooKeeper() {
return getSessionList(curator.getChildren(sessionsPath));
}
public RemoteSession createRemoteSession(long sessionId) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient);
loadSessionIfActive(session);
remoteSessionCache.put(sessionId, session);
updateSessionStateWatcher(sessionId);
return session;
}
public int deleteExpiredRemoteSessions(Clock clock) {
Duration expiryTime = configserverConfig.hostedVespa()
? sessionLifetime.multipliedBy(2)
: sessionLifetime.multipliedBy(12);
List<Long> remoteSessionsFromZooKeeper = getRemoteSessionsFromZooKeeper();
log.log(Level.FINE, () -> "Remote sessions for tenant " + tenantName + ": " + remoteSessionsFromZooKeeper);
int deleted = 0;
int deleteMax = (int) Math.min(1000, Math.max(10, remoteSessionsFromZooKeeper.size() * 0.01));
for (long sessionId : remoteSessionsFromZooKeeper) {
Session session = remoteSessionCache.get(sessionId);
if (session == null)
session = new RemoteSession(tenantName, sessionId, createSessionZooKeeperClient(sessionId));
if (session.getStatus() == Session.Status.ACTIVATE) continue;
if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) {
log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it");
deleteRemoteSessionFromZooKeeper(session);
deleted++;
}
if (deleted >= deleteMax)
break;
}
return deleted;
}
public void deactivateSession(long sessionId) {
var s = remoteSessionCache.get(sessionId);
if (s == null) return;
remoteSessionCache.put(sessionId, s.deactivated());
}
public void deleteRemoteSessionFromZooKeeper(Session session) {
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId());
Transaction transaction = sessionZooKeeperClient.deleteTransaction();
transaction.commit();
transaction.close();
}
private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) {
return created.plus(expiryTime).isBefore(clock.instant());
}
private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) {
return getSessionList(children.stream()
.map(child -> Path.fromString(child.getPath()).getName())
.collect(Collectors.toList()));
}
private List<Long> getSessionList(List<String> children) {
return children.stream().map(Long::parseLong).collect(Collectors.toList());
}
private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException {
Map<Long, Future<?>> futures = new HashMap<>();
for (long sessionId : getRemoteSessionsFromZooKeeper()) {
futures.put(sessionId, executor.submit(() -> sessionAdded(sessionId)));
}
futures.forEach((sessionId, future) -> {
try {
future.get();
log.log(Level.FINE, () -> "Remote session " + sessionId + " loaded");
} catch (ExecutionException | InterruptedException e) {
throw new RuntimeException("Could not load remote session " + sessionId, e);
}
});
}
/**
* A session for which we don't have a watcher, i.e. hitherto unknown to us.
*
* @param sessionId session id for the new session
*/
public void sessionAdded(long sessionId) {
if (hasStatusDeleted(sessionId)) return;
log.log(Level.FINE, () -> "Adding remote session " + sessionId);
Session session = createRemoteSession(sessionId);
if (session.getStatus() == Session.Status.NEW) {
log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId);
confirmUpload(session);
}
createLocalSessionFromDistributedApplicationPackage(sessionId);
}
private boolean hasStatusDeleted(long sessionId) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient);
return session.getStatus() == Session.Status.DELETE;
}
void activate(long sessionId) {
createLocalSessionFromDistributedApplicationPackage(sessionId);
RemoteSession session = remoteSessionCache.get(sessionId);
if (session == null) return;
CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter();
log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId);
applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId);
log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter);
notifyCompletion(waiter);
log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId);
}
private void loadSessionIfActive(RemoteSession session) {
for (ApplicationId applicationId : applicationRepo.activeApplications()) {
Optional<Long> activeSession = applicationRepo.activeSessionOf(applicationId);
if (activeSession.isPresent() && activeSession.get() == session.getSessionId()) {
log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it");
applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId());
log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")");
return;
}
}
}
void prepareRemoteSession(long sessionId) {
createLocalSessionFromDistributedApplicationPackage(sessionId);
RemoteSession session = remoteSessionCache.get(sessionId);
if (session == null) return;
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId);
CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter();
ensureApplicationLoaded(session);
notifyCompletion(waiter);
}
public ApplicationSet ensureApplicationLoaded(RemoteSession session) {
if (session.applicationSet().isPresent()) {
return session.applicationSet().get();
}
Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId());
Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan)
.flatMap(this::getApplicationSet);
ApplicationSet applicationSet = loadApplication(session, previousApplicationSet);
RemoteSession activated = session.activated(applicationSet);
long sessionId = activated.getSessionId();
remoteSessionCache.put(sessionId, activated);
updateSessionStateWatcher(sessionId);
return applicationSet;
}
void confirmUpload(Session session) {
CompletionWaiter waiter = createSessionZooKeeperClient(session.getSessionId()).getUploadWaiter();
long sessionId = session.getSessionId();
log.log(Level.FINE, () -> "Notifying upload waiter for session " + sessionId);
notifyCompletion(waiter);
log.log(Level.FINE, () -> "Done notifying upload for session " + sessionId);
}
void notifyCompletion(CompletionWaiter completionWaiter) {
try {
completionWaiter.notifyCompletion();
} catch (RuntimeException e) {
Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class,
KeeperException.NodeExistsException.class);
Class<? extends Throwable> exceptionClass = e.getCause().getClass();
if (acceptedExceptions.contains(exceptionClass))
log.log(Level.FINE, () -> "Not able to notify completion for session (" + completionWaiter + ")," +
" node " + (exceptionClass.equals(KeeperException.NoNodeException.class)
? "has been deleted"
: "already exists"));
else
throw e;
}
}
private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) {
log.log(Level.FINE, () -> "Loading application for " + session);
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId());
ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage();
ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(),
session.getSessionId(),
sessionZooKeeperClient,
previousApplicationSet,
sessionPreparer.getExecutor(),
curator,
metrics,
permanentApplicationPackage,
flagSource,
secretStore,
hostProvisionerProvider,
configserverConfig,
zone,
modelFactoryRegistry,
configDefinitionRepo);
return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(),
sessionZooKeeperClient.readDockerImageRepository(),
sessionZooKeeperClient.readVespaVersion(),
applicationPackage,
new AllocatedHostsFromAllModels(),
clock.instant()));
}
private void nodeChanged() {
zkWatcherExecutor.execute(() -> {
Multiset<Session.Status> sessionMetrics = HashMultiset.create();
getRemoteSessions().forEach(session -> sessionMetrics.add(session.getStatus()));
metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW));
metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE));
metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE));
metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE));
});
}
@SuppressWarnings("unused")
private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) {
zkWatcherExecutor.execute(() -> {
log.log(Level.FINE, () -> "Got child event: " + event);
switch (event.getType()) {
case CHILD_ADDED:
case CHILD_REMOVED:
case CONNECTION_RECONNECTED:
sessionsChanged();
break;
default:
break;
}
});
}
public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) {
log.log(Level.FINE, () -> "Deleting expired local sessions for tenant '" + tenantName + "'");
Set<Long> sessionIdsToDelete = new HashSet<>();
Set<Long> newSessions = findNewSessionsInFileSystem();
try {
for (long sessionId : getLocalSessionsIdsFromFileSystem()) {
if (newSessions.contains(sessionId))
continue;
var sessionZooKeeperClient = createSessionZooKeeperClient(sessionId);
Instant createTime = sessionZooKeeperClient.readCreateTime();
Session.Status status = sessionZooKeeperClient.readStatus();
log.log(Level.FINE, () -> "Candidate local session for deletion: " + sessionId +
", created: " + createTime + ", status " + status + ", can be deleted: " + canBeDeleted(sessionId, status) +
", hasExpired: " + hasExpired(createTime));
if (hasExpired(createTime) && canBeDeleted(sessionId, status)) {
log.log(Level.FINE, () -> "expired: " + hasExpired(createTime) + ", can be deleted: " + canBeDeleted(sessionId, status));
sessionIdsToDelete.add(sessionId);
} else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) {
LocalSession session;
log.log(Level.FINE, () -> "not expired, but more than 1 day old: " + sessionId);
try {
session = getSessionFromFile(sessionId);
} catch (Exception e) {
log.log(Level.FINE, () -> "could not get session from file: " + sessionId + ": " + e.getMessage());
continue;
}
Optional<ApplicationId> applicationId = session.getOptionalApplicationId();
if (applicationId.isEmpty()) continue;
Long activeSession = activeSessions.get(applicationId.get());
if (activeSession == null || activeSession != sessionId) {
sessionIdsToDelete.add(sessionId);
log.log(Level.FINE, () -> "Will delete inactive session " + sessionId + " created " +
createTime + " for '" + applicationId + "'");
}
}
}
sessionIdsToDelete.forEach(this::deleteLocalSession);
} catch (Throwable e) {
log.log(Level.WARNING, "Error when purging old sessions ", e);
}
log.log(Level.FINE, () -> "Done purging old sessions");
}
private boolean hasExpired(Instant created) {
return created.plus(sessionLifetime).isBefore(clock.instant());
}
private boolean canBeDeleted(long sessionId, Session.Status status) {
return ( ! List.of(Session.Status.UNKNOWN, Session.Status.ACTIVATE).contains(status))
|| oldSessionDirWithUnknownStatus(sessionId, status);
}
private boolean oldSessionDirWithUnknownStatus(long sessionId, Session.Status status) {
Duration expiryTime = Duration.ofHours(configserverConfig.keepSessionsWithUnknownStatusHours());
File sessionDir = tenantFileSystemDirs.getUserApplicationDir(sessionId);
return sessionDir.exists()
&& status == Session.Status.UNKNOWN
&& created(sessionDir).plus(expiryTime).isBefore(clock.instant());
}
private Set<Long> findNewSessionsInFileSystem() {
File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter);
Set<Long> newSessions = new HashSet<>();
if (sessions != null) {
for (File session : sessions) {
try {
if (Files.getLastModifiedTime(session.toPath()).toInstant()
.isAfter(clock.instant().minus(Duration.ofSeconds(30))))
newSessions.add(Long.parseLong(session.getName()));
} catch (IOException e) {
log.log(Level.FINE, "Unable to find last modified time for " + session.toPath());
}
}
}
return newSessions;
}
private Instant created(File file) {
BasicFileAttributes fileAttributes;
try {
fileAttributes = readAttributes(file.toPath(), BasicFileAttributes.class);
return fileAttributes.creationTime().toInstant();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void ensureSessionPathDoesNotExist(long sessionId) {
Path sessionPath = getSessionPath(sessionId);
if (curator.exists(sessionPath)) {
throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper");
}
}
private ApplicationPackage createApplication(File userDir,
File configApplicationDir,
ApplicationId applicationId,
Tags tags,
long sessionId,
Optional<Long> currentlyActiveSessionId,
boolean internalRedeploy,
Optional<DeployLogger> deployLogger) {
long deployTimestamp = System.currentTimeMillis();
DeployData deployData = new DeployData(userDir.getAbsolutePath(), applicationId, tags, deployTimestamp, internalRedeploy,
sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId));
FilesApplicationPackage app = FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData);
validateFileExtensions(applicationId, deployLogger, app);
return app;
}
private void validateFileExtensions(ApplicationId applicationId, Optional<DeployLogger> deployLogger, FilesApplicationPackage app) {
try {
app.validateFileExtensions();
} catch (IllegalArgumentException e) {
if (configserverConfig.hostedVespa()) {
UnboundStringFlag flag = PermanentFlags.APPLICATION_FILES_WITH_UNKNOWN_EXTENSION;
String value = flag.bindTo(flagSource).with(APPLICATION_ID, applicationId.serializedForm()).value();
switch (value) {
case "FAIL" -> throw new InvalidApplicationException(e);
case "LOG" -> deployLogger.ifPresent(logger -> logger.logApplicationPackage(Level.WARNING, e.getMessage()));
default -> log.log(Level.WARNING, "Unknown value for flag " + flag.id() + ": " + value);
}
} else {
deployLogger.ifPresent(logger -> logger.logApplicationPackage(Level.WARNING, e.getMessage()));
}
}
}
private LocalSession createSessionFromApplication(File applicationDirectory,
ApplicationId applicationId,
Tags tags,
boolean internalRedeploy,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
long sessionId = getNextSessionId();
try {
ensureSessionPathDoesNotExist(sessionId);
ApplicationPackage app = createApplicationPackage(applicationDirectory, applicationId, tags, sessionId, internalRedeploy, Optional.of(deployLogger));
log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper");
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
sessionZKClient.createNewSession(clock.instant());
CompletionWaiter waiter = sessionZKClient.getUploadWaiter();
LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient);
waiter.awaitCompletion(Duration.ofSeconds(Math.min(120, timeoutBudget.timeLeft().getSeconds())));
addLocalSession(session);
return session;
} catch (IOException e) {
throw new RuntimeException("Error creating session " + sessionId, e);
}
}
private ApplicationPackage createApplicationPackage(File applicationDirectory,
ApplicationId applicationId,
Tags tags,
long sessionId,
boolean internalRedeploy,
Optional<DeployLogger> deployLogger) throws IOException {
synchronized (monitor) {
Optional<Long> activeSessionId = getActiveSessionId(applicationId);
File userApplicationDir = getSessionAppDir(sessionId);
copyApp(applicationDirectory, userApplicationDir);
ApplicationPackage applicationPackage = createApplication(applicationDirectory,
userApplicationDir,
applicationId,
tags,
sessionId,
activeSessionId,
internalRedeploy,
deployLogger);
applicationPackage.writeMetaData();
return applicationPackage;
}
}
public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) {
return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet);
}
private Optional<ApplicationSet> getApplicationSet(long sessionId) {
Optional<ApplicationSet> applicationSet = Optional.empty();
try {
applicationSet = Optional.ofNullable(getRemoteSession(sessionId)).map(this::ensureApplicationLoaded);
} catch (IllegalArgumentException e) {
}
return applicationSet;
}
private void copyApp(File sourceDir, File destinationDir) throws IOException {
if (destinationDir.exists()) {
log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied");
return;
}
if (! sourceDir.isDirectory())
throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory");
java.nio.file.Path tempDestinationDir = null;
try {
tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package");
log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath());
IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile());
moveSearchDefinitionsToSchemasDir(tempDestinationDir);
log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath());
Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE);
} finally {
if (tempDestinationDir != null)
IOUtils.recursiveDeleteDir(tempDestinationDir.toFile());
}
}
/**
* Returns a new session instance for the given session id.
*/
void createSessionFromId(long sessionId) {
File sessionDir = getAndValidateExistingSessionAppDir(sessionId);
ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir);
createLocalSession(sessionId, applicationPackage);
}
void createLocalSession(long sessionId, ApplicationPackage applicationPackage) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient);
addLocalSession(session);
}
/**
* Create a new local session for the given session id if it does not already exist.
* Will also add the session to the local session cache if necessary. If there is no
* remote session matching the session it will also be created.
*/
public void createLocalSessionFromDistributedApplicationPackage(long sessionId) {
if (applicationRepo.sessionExistsInFileSystem(sessionId)) {
log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists");
createSessionFromId(sessionId);
return;
}
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
FileReference fileReference = sessionZKClient.readApplicationPackageReference();
log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference);
if (fileReference != null) {
File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir()));
File sessionDir;
FileDirectory fileDirectory = new FileDirectory(rootDir);
try {
sessionDir = fileDirectory.getFile(fileReference);
} catch (IllegalArgumentException e) {
log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory);
return;
}
ApplicationId applicationId = sessionZKClient.readApplicationId()
.orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId));
log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId);
try {
createLocalSession(sessionDir, applicationId, sessionZKClient.readTags(), sessionId);
} finally {
log.log(Level.FINE, "Deleting file distribution reference for app package with session id " + sessionDir);
IOUtils.recursiveDeleteDir(sessionDir);
}
}
}
private Optional<Long> getActiveSessionId(ApplicationId applicationId) {
return applicationRepo.activeSessionOf(applicationId);
}
private long getNextSessionId() {
return sessionCounter.nextSessionId();
}
public Path getSessionPath(long sessionId) {
return sessionsPath.append(String.valueOf(sessionId));
}
Path getSessionStatePath(long sessionId) {
return getSessionPath(sessionId).append(ZKApplication.SESSIONSTATE_ZK_SUBPATH);
}
public SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) {
return new SessionZooKeeperClient(curator,
tenantName,
sessionId,
configserverConfig.serverId(),
fileDistributionFactory.createFileManager(getSessionAppDir(sessionId)),
maxNodeSize);
}
private File getAndValidateExistingSessionAppDir(long sessionId) {
File appDir = getSessionAppDir(sessionId);
if (!appDir.exists() || !appDir.isDirectory()) {
throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId);
}
return appDir;
}
private File getSessionAppDir(long sessionId) {
return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId);
}
private void updateSessionStateWatcher(long sessionId) {
sessionStateWatchers.computeIfAbsent(sessionId, (id) -> {
Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(id).getAbsolute(), false);
fileCache.addListener(this::nodeChanged);
return new SessionStateWatcher(fileCache, id, metricUpdater, zkWatcherExecutor, this);
});
}
@Override
public String toString() {
return getLocalSessions().toString();
}
public Clock clock() { return clock; }
public void close() {
deleteAllSessions();
tenantFileSystemDirs.delete();
try {
if (directoryCache != null) {
directoryCache.close();
}
} catch (Exception e) {
log.log(Level.WARNING, "Exception when closing path cache", e);
} finally {
checkForRemovedSessions(new ArrayList<>());
}
}
private void sessionsChanged() throws NumberFormatException {
List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData());
checkForRemovedSessions(sessions);
checkForAddedSessions(sessions);
}
private void checkForRemovedSessions(List<Long> existingSessions) {
for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) {
long sessionId = it.next().sessionId;
if (existingSessions.contains(sessionId)) continue;
SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId);
if (watcher != null) watcher.close();
it.remove();
metricUpdater.incRemovedSessions();
}
}
private void checkForAddedSessions(List<Long> sessions) {
for (Long sessionId : sessions)
if (remoteSessionCache.get(sessionId) == null)
sessionAdded(sessionId);
}
public Transaction createActivateTransaction(Session session) {
Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE);
transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations());
return transaction;
}
public Transaction createSetStatusTransaction(Session session, Session.Status status) {
return session.sessionZooKeeperClient.createWriteStatusTransaction(status);
}
void setPrepared(Session session) {
session.setStatus(Session.Status.PREPARE);
}
private static class FileTransaction extends AbstractTransaction {
public static FileTransaction from(FileOperation operation) {
FileTransaction transaction = new FileTransaction();
transaction.add(operation);
return transaction;
}
@Override
public void prepare() { }
@Override
public void commit() {
for (Operation operation : operations())
((FileOperation)operation).commit();
}
}
/** Factory for file operations */
private static class FileOperations {
/** Creates an operation which recursively deletes the given path */
public static DeleteOperation delete(String pathToDelete) {
return new DeleteOperation(pathToDelete);
}
}
private interface FileOperation extends Transaction.Operation {
void commit();
}
/**
* Recursively deletes this path and everything below.
* Succeeds with no action if the path does not exist.
*/
private static class DeleteOperation implements FileOperation {
private final String pathToDelete;
DeleteOperation(String pathToDelete) {
this.pathToDelete = pathToDelete;
}
@Override
public void commit() {
IOUtils.recursiveDeleteDir(new File(pathToDelete));
}
}
} |
Yes, that's the assumption | private void moveSearchDefinitionsToSchemasDir(java.nio.file.Path applicationDir) throws IOException {
File schemasDir = applicationDir.resolve(ApplicationPackage.SCHEMAS_DIR.getRelative()).toFile();
File sdDir = applicationDir.resolve(ApplicationPackage.SEARCH_DEFINITIONS_DIR.getRelative()).toFile();
if (sdDir.exists() && sdDir.isDirectory()) {
try {
File[] sdFiles = sdDir.listFiles();
if (sdFiles != null) {
Files.createDirectories(schemasDir.toPath());
Arrays.asList(sdFiles).forEach(file -> Exceptions.uncheck(
() -> Files.move(file.toPath(),
schemasDir.toPath().resolve(file.toPath().getFileName()),
StandardCopyOption.REPLACE_EXISTING)));
}
Files.delete(sdDir.toPath());
} catch (IOException | UncheckedIOException e) {
if (schemasDir.exists() && schemasDir.isDirectory())
throw new InvalidApplicationException(
"Both " + ApplicationPackage.SCHEMAS_DIR.getRelative() + "/ and " + ApplicationPackage.SEARCH_DEFINITIONS_DIR +
"/ exist in application package, please remove " + ApplicationPackage.SEARCH_DEFINITIONS_DIR + "/", e);
else
throw e;
}
}
} | if (schemasDir.exists() && schemasDir.isDirectory()) | private void moveSearchDefinitionsToSchemasDir(java.nio.file.Path applicationDir) throws IOException {
File schemasDir = applicationDir.resolve(ApplicationPackage.SCHEMAS_DIR.getRelative()).toFile();
File sdDir = applicationDir.resolve(ApplicationPackage.SEARCH_DEFINITIONS_DIR.getRelative()).toFile();
if (sdDir.exists() && sdDir.isDirectory()) {
try {
File[] sdFiles = sdDir.listFiles();
if (sdFiles != null) {
Files.createDirectories(schemasDir.toPath());
Arrays.asList(sdFiles).forEach(file -> Exceptions.uncheck(
() -> Files.move(file.toPath(),
schemasDir.toPath().resolve(file.toPath().getFileName()),
StandardCopyOption.REPLACE_EXISTING)));
}
Files.delete(sdDir.toPath());
} catch (IOException | UncheckedIOException e) {
if (schemasDir.exists() && schemasDir.isDirectory())
throw new InvalidApplicationException(
"Both " + ApplicationPackage.SCHEMAS_DIR.getRelative() + "/ and " + ApplicationPackage.SEARCH_DEFINITIONS_DIR +
"/ exist in application package, please remove " + ApplicationPackage.SEARCH_DEFINITIONS_DIR + "/", e);
else
throw e;
}
}
} | class SessionRepository {
private static final Logger log = Logger.getLogger(SessionRepository.class.getName());
private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+");
private static final long nonExistingActiveSessionId = 0;
private final Object monitor = new Object();
private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>());
private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>());
private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>());
private final Duration sessionLifetime;
private final Clock clock;
private final Curator curator;
private final Executor zkWatcherExecutor;
private final FileDistributionFactory fileDistributionFactory;
private final PermanentApplicationPackage permanentApplicationPackage;
private final FlagSource flagSource;
private final TenantFileSystemDirs tenantFileSystemDirs;
private final Metrics metrics;
private final MetricUpdater metricUpdater;
private final Curator.DirectoryCache directoryCache;
private final TenantApplications applicationRepo;
private final SessionPreparer sessionPreparer;
private final Path sessionsPath;
private final TenantName tenantName;
private final SessionCounter sessionCounter;
private final SecretStore secretStore;
private final HostProvisionerProvider hostProvisionerProvider;
private final ConfigserverConfig configserverConfig;
private final ConfigServerDB configServerDB;
private final Zone zone;
private final ModelFactoryRegistry modelFactoryRegistry;
private final ConfigDefinitionRepo configDefinitionRepo;
private final int maxNodeSize;
public SessionRepository(TenantName tenantName,
TenantApplications applicationRepo,
SessionPreparer sessionPreparer,
Curator curator,
Metrics metrics,
StripedExecutor<TenantName> zkWatcherExecutor,
FileDistributionFactory fileDistributionFactory,
PermanentApplicationPackage permanentApplicationPackage,
FlagSource flagSource,
ExecutorService zkCacheExecutor,
SecretStore secretStore,
HostProvisionerProvider hostProvisionerProvider,
ConfigserverConfig configserverConfig,
ConfigServerDB configServerDB,
Zone zone,
Clock clock,
ModelFactoryRegistry modelFactoryRegistry,
ConfigDefinitionRepo configDefinitionRepo,
int maxNodeSize) {
this.tenantName = tenantName;
sessionCounter = new SessionCounter(curator, tenantName);
this.sessionsPath = TenantRepository.getSessionsPath(tenantName);
this.clock = clock;
this.curator = curator;
this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime());
this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command);
this.fileDistributionFactory = fileDistributionFactory;
this.permanentApplicationPackage = permanentApplicationPackage;
this.flagSource = flagSource;
this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName);
this.applicationRepo = applicationRepo;
this.sessionPreparer = sessionPreparer;
this.metrics = metrics;
this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName));
this.secretStore = secretStore;
this.hostProvisionerProvider = hostProvisionerProvider;
this.configserverConfig = configserverConfig;
this.configServerDB = configServerDB;
this.zone = zone;
this.modelFactoryRegistry = modelFactoryRegistry;
this.configDefinitionRepo = configDefinitionRepo;
this.maxNodeSize = maxNodeSize;
loadSessions();
this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor);
this.directoryCache.addListener(this::childEvent);
this.directoryCache.start();
}
private void loadSessions() {
ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()),
new DaemonThreadFactory("load-sessions-"));
loadSessions(executor);
}
void loadSessions(ExecutorService executor) {
loadRemoteSessions(executor);
try {
executor.shutdown();
if ( ! executor.awaitTermination(1, TimeUnit.MINUTES))
log.log(Level.INFO, "Executor did not terminate");
} catch (InterruptedException e) {
log.log(Level.WARNING, "Shutdown of executor for loading sessions failed: " + Exceptions.toMessageString(e));
}
}
public void addLocalSession(LocalSession session) {
long sessionId = session.getSessionId();
localSessionCache.put(sessionId, session);
if (remoteSessionCache.get(sessionId) == null)
createRemoteSession(sessionId);
}
public LocalSession getLocalSession(long sessionId) {
return localSessionCache.get(sessionId);
}
/** Returns a copy of local sessions */
public Collection<LocalSession> getLocalSessions() {
return List.copyOf(localSessionCache.values());
}
public Set<LocalSession> getLocalSessionsFromFileSystem() {
File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter);
if (sessions == null) return Set.of();
Set<LocalSession> sessionIds = new HashSet<>();
for (File session : sessions) {
long sessionId = Long.parseLong(session.getName());
LocalSession localSession = getSessionFromFile(sessionId);
sessionIds.add(localSession);
}
return sessionIds;
}
private LocalSession getSessionFromFile(long sessionId) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
File sessionDir = getAndValidateExistingSessionAppDir(sessionId);
ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir);
return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient);
}
public Set<Long> getLocalSessionsIdsFromFileSystem() {
File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter);
if (sessions == null) return Set.of();
Set<Long> sessionIds = new HashSet<>();
for (File session : sessions) {
long sessionId = Long.parseLong(session.getName());
sessionIds.add(sessionId);
}
return sessionIds;
}
public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) {
params.vespaVersion().ifPresent(version -> {
if ( ! params.isBootstrap() && ! modelFactoryRegistry.allVersions().contains(version))
throw new UnknownVespaVersionException("Vespa version '" + version + "' not known by this config server");
});
applicationRepo.createApplication(params.getApplicationId());
logger.log(Level.FINE, "Created application " + params.getApplicationId());
long sessionId = session.getSessionId();
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId);
Optional<CompletionWaiter> waiter = params.isDryRun()
? Optional.empty()
: Optional.of(sessionZooKeeperClient.createPrepareWaiter());
Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId());
ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params,
activeApplicationSet, now, getSessionAppDir(sessionId),
session.getApplicationPackage(), sessionZooKeeperClient)
.getConfigChangeActions();
setPrepared(session);
waiter.ifPresent(w -> w.awaitCompletion(params.getTimeoutBudget().timeLeft()));
return actions;
}
/**
* Creates a new deployment session from an already existing session.
*
* @param existingSession the session to use as base
* @param internalRedeploy whether this session is for a system internal redeploy — not an application package change
* @param timeoutBudget timeout for creating session and waiting for other servers.
* @return a new session
*/
public LocalSession createSessionFromExisting(Session existingSession,
boolean internalRedeploy,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
ApplicationId existingApplicationId = existingSession.getApplicationId();
Tags existingTags = existingSession.getTags();
File existingApp = getSessionAppDir(existingSession.getSessionId());
LocalSession session = createSessionFromApplication(existingApp,
existingApplicationId,
existingTags,
internalRedeploy,
timeoutBudget,
deployLogger);
session.setApplicationId(existingApplicationId);
session.setTags(existingTags);
session.setApplicationPackageReference(existingSession.getApplicationPackageReference());
session.setVespaVersion(existingSession.getVespaVersion());
session.setDockerImageRepository(existingSession.getDockerImageRepository());
session.setAthenzDomain(existingSession.getAthenzDomain());
session.setTenantSecretStores(existingSession.getTenantSecretStores());
session.setOperatorCertificates(existingSession.getOperatorCertificates());
session.setCloudAccount(existingSession.getCloudAccount());
return session;
}
/**
* Creates a new deployment session from an application package.
*
* @param applicationDirectory a File pointing to an application.
* @param applicationId application id for this new session.
* @param timeoutBudget Timeout for creating session and waiting for other servers.
* @return a new session
*/
public LocalSession createSessionFromApplicationPackage(File applicationDirectory,
ApplicationId applicationId,
Tags tags,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
applicationRepo.createApplication(applicationId);
return createSessionFromApplication(applicationDirectory, applicationId, tags, false, timeoutBudget, deployLogger);
}
/**
* Creates a local session based on a remote session and the distributed application package.
* Does not wait for session being created on other servers.
*/
private void createLocalSession(File applicationFile, ApplicationId applicationId, Tags tags, long sessionId) {
try {
ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, tags, sessionId, false, Optional.empty());
createLocalSession(sessionId, applicationPackage);
} catch (Exception e) {
throw new RuntimeException("Error creating session " + sessionId, e);
}
}
public void deleteLocalSession(long sessionId) {
log.log(Level.FINE, () -> "Deleting local session " + sessionId);
SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId);
if (watcher != null) watcher.close();
localSessionCache.remove(sessionId);
NestedTransaction transaction = new NestedTransaction();
transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath())));
transaction.commit();
}
private void deleteAllSessions() {
for (LocalSession session : getLocalSessions()) {
deleteLocalSession(session.getSessionId());
}
}
public RemoteSession getRemoteSession(long sessionId) {
return remoteSessionCache.get(sessionId);
}
/** Returns a copy of remote sessions */
public Collection<RemoteSession> getRemoteSessions() {
return List.copyOf(remoteSessionCache.values());
}
public List<Long> getRemoteSessionsFromZooKeeper() {
return getSessionList(curator.getChildren(sessionsPath));
}
public RemoteSession createRemoteSession(long sessionId) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient);
loadSessionIfActive(session);
remoteSessionCache.put(sessionId, session);
updateSessionStateWatcher(sessionId);
return session;
}
public int deleteExpiredRemoteSessions(Clock clock) {
Duration expiryTime = configserverConfig.hostedVespa()
? sessionLifetime.multipliedBy(2)
: sessionLifetime.multipliedBy(12);
List<Long> remoteSessionsFromZooKeeper = getRemoteSessionsFromZooKeeper();
log.log(Level.FINE, () -> "Remote sessions for tenant " + tenantName + ": " + remoteSessionsFromZooKeeper);
int deleted = 0;
int deleteMax = (int) Math.min(1000, Math.max(10, remoteSessionsFromZooKeeper.size() * 0.01));
for (long sessionId : remoteSessionsFromZooKeeper) {
Session session = remoteSessionCache.get(sessionId);
if (session == null)
session = new RemoteSession(tenantName, sessionId, createSessionZooKeeperClient(sessionId));
if (session.getStatus() == Session.Status.ACTIVATE) continue;
if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) {
log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it");
deleteRemoteSessionFromZooKeeper(session);
deleted++;
}
if (deleted >= deleteMax)
break;
}
return deleted;
}
public void deactivateSession(long sessionId) {
var s = remoteSessionCache.get(sessionId);
if (s == null) return;
remoteSessionCache.put(sessionId, s.deactivated());
}
public void deleteRemoteSessionFromZooKeeper(Session session) {
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId());
Transaction transaction = sessionZooKeeperClient.deleteTransaction();
transaction.commit();
transaction.close();
}
private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) {
return created.plus(expiryTime).isBefore(clock.instant());
}
private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) {
return getSessionList(children.stream()
.map(child -> Path.fromString(child.getPath()).getName())
.collect(Collectors.toList()));
}
private List<Long> getSessionList(List<String> children) {
return children.stream().map(Long::parseLong).collect(Collectors.toList());
}
private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException {
Map<Long, Future<?>> futures = new HashMap<>();
for (long sessionId : getRemoteSessionsFromZooKeeper()) {
futures.put(sessionId, executor.submit(() -> sessionAdded(sessionId)));
}
futures.forEach((sessionId, future) -> {
try {
future.get();
log.log(Level.FINE, () -> "Remote session " + sessionId + " loaded");
} catch (ExecutionException | InterruptedException e) {
throw new RuntimeException("Could not load remote session " + sessionId, e);
}
});
}
/**
* A session for which we don't have a watcher, i.e. hitherto unknown to us.
*
* @param sessionId session id for the new session
*/
public void sessionAdded(long sessionId) {
if (hasStatusDeleted(sessionId)) return;
log.log(Level.FINE, () -> "Adding remote session " + sessionId);
Session session = createRemoteSession(sessionId);
if (session.getStatus() == Session.Status.NEW) {
log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId);
confirmUpload(session);
}
createLocalSessionFromDistributedApplicationPackage(sessionId);
}
private boolean hasStatusDeleted(long sessionId) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient);
return session.getStatus() == Session.Status.DELETE;
}
void activate(long sessionId) {
createLocalSessionFromDistributedApplicationPackage(sessionId);
RemoteSession session = remoteSessionCache.get(sessionId);
if (session == null) return;
CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter();
log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId);
applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId);
log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter);
notifyCompletion(waiter);
log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId);
}
private void loadSessionIfActive(RemoteSession session) {
for (ApplicationId applicationId : applicationRepo.activeApplications()) {
Optional<Long> activeSession = applicationRepo.activeSessionOf(applicationId);
if (activeSession.isPresent() && activeSession.get() == session.getSessionId()) {
log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it");
applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId());
log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")");
return;
}
}
}
void prepareRemoteSession(long sessionId) {
createLocalSessionFromDistributedApplicationPackage(sessionId);
RemoteSession session = remoteSessionCache.get(sessionId);
if (session == null) return;
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId);
CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter();
ensureApplicationLoaded(session);
notifyCompletion(waiter);
}
public ApplicationSet ensureApplicationLoaded(RemoteSession session) {
if (session.applicationSet().isPresent()) {
return session.applicationSet().get();
}
Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId());
Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan)
.flatMap(this::getApplicationSet);
ApplicationSet applicationSet = loadApplication(session, previousApplicationSet);
RemoteSession activated = session.activated(applicationSet);
long sessionId = activated.getSessionId();
remoteSessionCache.put(sessionId, activated);
updateSessionStateWatcher(sessionId);
return applicationSet;
}
void confirmUpload(Session session) {
CompletionWaiter waiter = createSessionZooKeeperClient(session.getSessionId()).getUploadWaiter();
long sessionId = session.getSessionId();
log.log(Level.FINE, () -> "Notifying upload waiter for session " + sessionId);
notifyCompletion(waiter);
log.log(Level.FINE, () -> "Done notifying upload for session " + sessionId);
}
void notifyCompletion(CompletionWaiter completionWaiter) {
try {
completionWaiter.notifyCompletion();
} catch (RuntimeException e) {
Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class,
KeeperException.NodeExistsException.class);
Class<? extends Throwable> exceptionClass = e.getCause().getClass();
if (acceptedExceptions.contains(exceptionClass))
log.log(Level.FINE, () -> "Not able to notify completion for session (" + completionWaiter + ")," +
" node " + (exceptionClass.equals(KeeperException.NoNodeException.class)
? "has been deleted"
: "already exists"));
else
throw e;
}
}
private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) {
log.log(Level.FINE, () -> "Loading application for " + session);
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId());
ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage();
ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(),
session.getSessionId(),
sessionZooKeeperClient,
previousApplicationSet,
sessionPreparer.getExecutor(),
curator,
metrics,
permanentApplicationPackage,
flagSource,
secretStore,
hostProvisionerProvider,
configserverConfig,
zone,
modelFactoryRegistry,
configDefinitionRepo);
return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(),
sessionZooKeeperClient.readDockerImageRepository(),
sessionZooKeeperClient.readVespaVersion(),
applicationPackage,
new AllocatedHostsFromAllModels(),
clock.instant()));
}
private void nodeChanged() {
zkWatcherExecutor.execute(() -> {
Multiset<Session.Status> sessionMetrics = HashMultiset.create();
getRemoteSessions().forEach(session -> sessionMetrics.add(session.getStatus()));
metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW));
metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE));
metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE));
metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE));
});
}
@SuppressWarnings("unused")
private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) {
zkWatcherExecutor.execute(() -> {
log.log(Level.FINE, () -> "Got child event: " + event);
switch (event.getType()) {
case CHILD_ADDED:
case CHILD_REMOVED:
case CONNECTION_RECONNECTED:
sessionsChanged();
break;
default:
break;
}
});
}
public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) {
log.log(Level.FINE, () -> "Deleting expired local sessions for tenant '" + tenantName + "'");
Set<Long> sessionIdsToDelete = new HashSet<>();
Set<Long> newSessions = findNewSessionsInFileSystem();
try {
for (long sessionId : getLocalSessionsIdsFromFileSystem()) {
if (newSessions.contains(sessionId))
continue;
var sessionZooKeeperClient = createSessionZooKeeperClient(sessionId);
Instant createTime = sessionZooKeeperClient.readCreateTime();
Session.Status status = sessionZooKeeperClient.readStatus();
log.log(Level.FINE, () -> "Candidate local session for deletion: " + sessionId +
", created: " + createTime + ", status " + status + ", can be deleted: " + canBeDeleted(sessionId, status) +
", hasExpired: " + hasExpired(createTime));
if (hasExpired(createTime) && canBeDeleted(sessionId, status)) {
log.log(Level.FINE, () -> "expired: " + hasExpired(createTime) + ", can be deleted: " + canBeDeleted(sessionId, status));
sessionIdsToDelete.add(sessionId);
} else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) {
LocalSession session;
log.log(Level.FINE, () -> "not expired, but more than 1 day old: " + sessionId);
try {
session = getSessionFromFile(sessionId);
} catch (Exception e) {
log.log(Level.FINE, () -> "could not get session from file: " + sessionId + ": " + e.getMessage());
continue;
}
Optional<ApplicationId> applicationId = session.getOptionalApplicationId();
if (applicationId.isEmpty()) continue;
Long activeSession = activeSessions.get(applicationId.get());
if (activeSession == null || activeSession != sessionId) {
sessionIdsToDelete.add(sessionId);
log.log(Level.FINE, () -> "Will delete inactive session " + sessionId + " created " +
createTime + " for '" + applicationId + "'");
}
}
}
sessionIdsToDelete.forEach(this::deleteLocalSession);
} catch (Throwable e) {
log.log(Level.WARNING, "Error when purging old sessions ", e);
}
log.log(Level.FINE, () -> "Done purging old sessions");
}
private boolean hasExpired(Instant created) {
return created.plus(sessionLifetime).isBefore(clock.instant());
}
private boolean canBeDeleted(long sessionId, Session.Status status) {
return ( ! List.of(Session.Status.UNKNOWN, Session.Status.ACTIVATE).contains(status))
|| oldSessionDirWithUnknownStatus(sessionId, status);
}
private boolean oldSessionDirWithUnknownStatus(long sessionId, Session.Status status) {
Duration expiryTime = Duration.ofHours(configserverConfig.keepSessionsWithUnknownStatusHours());
File sessionDir = tenantFileSystemDirs.getUserApplicationDir(sessionId);
return sessionDir.exists()
&& status == Session.Status.UNKNOWN
&& created(sessionDir).plus(expiryTime).isBefore(clock.instant());
}
private Set<Long> findNewSessionsInFileSystem() {
File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter);
Set<Long> newSessions = new HashSet<>();
if (sessions != null) {
for (File session : sessions) {
try {
if (Files.getLastModifiedTime(session.toPath()).toInstant()
.isAfter(clock.instant().minus(Duration.ofSeconds(30))))
newSessions.add(Long.parseLong(session.getName()));
} catch (IOException e) {
log.log(Level.FINE, "Unable to find last modified time for " + session.toPath());
}
}
}
return newSessions;
}
private Instant created(File file) {
BasicFileAttributes fileAttributes;
try {
fileAttributes = readAttributes(file.toPath(), BasicFileAttributes.class);
return fileAttributes.creationTime().toInstant();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void ensureSessionPathDoesNotExist(long sessionId) {
Path sessionPath = getSessionPath(sessionId);
if (curator.exists(sessionPath)) {
throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper");
}
}
private ApplicationPackage createApplication(File userDir,
File configApplicationDir,
ApplicationId applicationId,
Tags tags,
long sessionId,
Optional<Long> currentlyActiveSessionId,
boolean internalRedeploy,
Optional<DeployLogger> deployLogger) {
long deployTimestamp = System.currentTimeMillis();
DeployData deployData = new DeployData(userDir.getAbsolutePath(), applicationId, tags, deployTimestamp, internalRedeploy,
sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId));
FilesApplicationPackage app = FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData);
validateFileExtensions(applicationId, deployLogger, app);
return app;
}
private void validateFileExtensions(ApplicationId applicationId, Optional<DeployLogger> deployLogger, FilesApplicationPackage app) {
try {
app.validateFileExtensions();
} catch (IllegalArgumentException e) {
if (configserverConfig.hostedVespa()) {
UnboundStringFlag flag = PermanentFlags.APPLICATION_FILES_WITH_UNKNOWN_EXTENSION;
String value = flag.bindTo(flagSource).with(APPLICATION_ID, applicationId.serializedForm()).value();
switch (value) {
case "FAIL" -> throw new InvalidApplicationException(e);
case "LOG" -> deployLogger.ifPresent(logger -> logger.logApplicationPackage(Level.WARNING, e.getMessage()));
default -> log.log(Level.WARNING, "Unknown value for flag " + flag.id() + ": " + value);
}
} else {
deployLogger.ifPresent(logger -> logger.logApplicationPackage(Level.WARNING, e.getMessage()));
}
}
}
private LocalSession createSessionFromApplication(File applicationDirectory,
ApplicationId applicationId,
Tags tags,
boolean internalRedeploy,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
long sessionId = getNextSessionId();
try {
ensureSessionPathDoesNotExist(sessionId);
ApplicationPackage app = createApplicationPackage(applicationDirectory, applicationId, tags, sessionId, internalRedeploy, Optional.of(deployLogger));
log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper");
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
sessionZKClient.createNewSession(clock.instant());
CompletionWaiter waiter = sessionZKClient.getUploadWaiter();
LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient);
waiter.awaitCompletion(Duration.ofSeconds(Math.min(120, timeoutBudget.timeLeft().getSeconds())));
addLocalSession(session);
return session;
} catch (IOException e) {
throw new RuntimeException("Error creating session " + sessionId, e);
}
}
private ApplicationPackage createApplicationPackage(File applicationDirectory,
ApplicationId applicationId,
Tags tags,
long sessionId,
boolean internalRedeploy,
Optional<DeployLogger> deployLogger) throws IOException {
synchronized (monitor) {
Optional<Long> activeSessionId = getActiveSessionId(applicationId);
File userApplicationDir = getSessionAppDir(sessionId);
copyApp(applicationDirectory, userApplicationDir);
ApplicationPackage applicationPackage = createApplication(applicationDirectory,
userApplicationDir,
applicationId,
tags,
sessionId,
activeSessionId,
internalRedeploy,
deployLogger);
applicationPackage.writeMetaData();
return applicationPackage;
}
}
public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) {
return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet);
}
private Optional<ApplicationSet> getApplicationSet(long sessionId) {
Optional<ApplicationSet> applicationSet = Optional.empty();
try {
applicationSet = Optional.ofNullable(getRemoteSession(sessionId)).map(this::ensureApplicationLoaded);
} catch (IllegalArgumentException e) {
}
return applicationSet;
}
private void copyApp(File sourceDir, File destinationDir) throws IOException {
if (destinationDir.exists()) {
log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied");
return;
}
if (! sourceDir.isDirectory())
throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory");
java.nio.file.Path tempDestinationDir = null;
try {
tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package");
log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath());
IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile());
moveSearchDefinitionsToSchemasDir(tempDestinationDir);
log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath());
Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE);
} finally {
if (tempDestinationDir != null)
IOUtils.recursiveDeleteDir(tempDestinationDir.toFile());
}
}
/**
* Returns a new session instance for the given session id.
*/
void createSessionFromId(long sessionId) {
File sessionDir = getAndValidateExistingSessionAppDir(sessionId);
ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir);
createLocalSession(sessionId, applicationPackage);
}
void createLocalSession(long sessionId, ApplicationPackage applicationPackage) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient);
addLocalSession(session);
}
/**
* Create a new local session for the given session id if it does not already exist.
* Will also add the session to the local session cache if necessary. If there is no
* remote session matching the session it will also be created.
*/
public void createLocalSessionFromDistributedApplicationPackage(long sessionId) {
if (applicationRepo.sessionExistsInFileSystem(sessionId)) {
log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists");
createSessionFromId(sessionId);
return;
}
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
FileReference fileReference = sessionZKClient.readApplicationPackageReference();
log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference);
if (fileReference != null) {
File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir()));
File sessionDir;
FileDirectory fileDirectory = new FileDirectory(rootDir);
try {
sessionDir = fileDirectory.getFile(fileReference);
} catch (IllegalArgumentException e) {
log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory);
return;
}
ApplicationId applicationId = sessionZKClient.readApplicationId()
.orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId));
log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId);
try {
createLocalSession(sessionDir, applicationId, sessionZKClient.readTags(), sessionId);
} finally {
log.log(Level.FINE, "Deleting file distribution reference for app package with session id " + sessionDir);
IOUtils.recursiveDeleteDir(sessionDir);
}
}
}
private Optional<Long> getActiveSessionId(ApplicationId applicationId) {
return applicationRepo.activeSessionOf(applicationId);
}
private long getNextSessionId() {
return sessionCounter.nextSessionId();
}
public Path getSessionPath(long sessionId) {
return sessionsPath.append(String.valueOf(sessionId));
}
Path getSessionStatePath(long sessionId) {
return getSessionPath(sessionId).append(ZKApplication.SESSIONSTATE_ZK_SUBPATH);
}
public SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) {
return new SessionZooKeeperClient(curator,
tenantName,
sessionId,
configserverConfig.serverId(),
fileDistributionFactory.createFileManager(getSessionAppDir(sessionId)),
maxNodeSize);
}
private File getAndValidateExistingSessionAppDir(long sessionId) {
File appDir = getSessionAppDir(sessionId);
if (!appDir.exists() || !appDir.isDirectory()) {
throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId);
}
return appDir;
}
private File getSessionAppDir(long sessionId) {
return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId);
}
private void updateSessionStateWatcher(long sessionId) {
sessionStateWatchers.computeIfAbsent(sessionId, (id) -> {
Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(id).getAbsolute(), false);
fileCache.addListener(this::nodeChanged);
return new SessionStateWatcher(fileCache, id, metricUpdater, zkWatcherExecutor, this);
});
}
@Override
public String toString() {
return getLocalSessions().toString();
}
public Clock clock() { return clock; }
public void close() {
deleteAllSessions();
tenantFileSystemDirs.delete();
try {
if (directoryCache != null) {
directoryCache.close();
}
} catch (Exception e) {
log.log(Level.WARNING, "Exception when closing path cache", e);
} finally {
checkForRemovedSessions(new ArrayList<>());
}
}
private void sessionsChanged() throws NumberFormatException {
List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData());
checkForRemovedSessions(sessions);
checkForAddedSessions(sessions);
}
private void checkForRemovedSessions(List<Long> existingSessions) {
for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) {
long sessionId = it.next().sessionId;
if (existingSessions.contains(sessionId)) continue;
SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId);
if (watcher != null) watcher.close();
it.remove();
metricUpdater.incRemovedSessions();
}
}
private void checkForAddedSessions(List<Long> sessions) {
for (Long sessionId : sessions)
if (remoteSessionCache.get(sessionId) == null)
sessionAdded(sessionId);
}
public Transaction createActivateTransaction(Session session) {
Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE);
transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations());
return transaction;
}
public Transaction createSetStatusTransaction(Session session, Session.Status status) {
return session.sessionZooKeeperClient.createWriteStatusTransaction(status);
}
void setPrepared(Session session) {
session.setStatus(Session.Status.PREPARE);
}
private static class FileTransaction extends AbstractTransaction {
public static FileTransaction from(FileOperation operation) {
FileTransaction transaction = new FileTransaction();
transaction.add(operation);
return transaction;
}
@Override
public void prepare() { }
@Override
public void commit() {
for (Operation operation : operations())
((FileOperation)operation).commit();
}
}
/** Factory for file operations */
private static class FileOperations {
/** Creates an operation which recursively deletes the given path */
public static DeleteOperation delete(String pathToDelete) {
return new DeleteOperation(pathToDelete);
}
}
private interface FileOperation extends Transaction.Operation {
void commit();
}
/**
* Recursively deletes this path and everything below.
* Succeeds with no action if the path does not exist.
*/
private static class DeleteOperation implements FileOperation {
private final String pathToDelete;
DeleteOperation(String pathToDelete) {
this.pathToDelete = pathToDelete;
}
@Override
public void commit() {
IOUtils.recursiveDeleteDir(new File(pathToDelete));
}
}
} | class SessionRepository {
private static final Logger log = Logger.getLogger(SessionRepository.class.getName());
private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+");
private static final long nonExistingActiveSessionId = 0;
private final Object monitor = new Object();
private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>());
private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>());
private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>());
private final Duration sessionLifetime;
private final Clock clock;
private final Curator curator;
private final Executor zkWatcherExecutor;
private final FileDistributionFactory fileDistributionFactory;
private final PermanentApplicationPackage permanentApplicationPackage;
private final FlagSource flagSource;
private final TenantFileSystemDirs tenantFileSystemDirs;
private final Metrics metrics;
private final MetricUpdater metricUpdater;
private final Curator.DirectoryCache directoryCache;
private final TenantApplications applicationRepo;
private final SessionPreparer sessionPreparer;
private final Path sessionsPath;
private final TenantName tenantName;
private final SessionCounter sessionCounter;
private final SecretStore secretStore;
private final HostProvisionerProvider hostProvisionerProvider;
private final ConfigserverConfig configserverConfig;
private final ConfigServerDB configServerDB;
private final Zone zone;
private final ModelFactoryRegistry modelFactoryRegistry;
private final ConfigDefinitionRepo configDefinitionRepo;
private final int maxNodeSize;
public SessionRepository(TenantName tenantName,
TenantApplications applicationRepo,
SessionPreparer sessionPreparer,
Curator curator,
Metrics metrics,
StripedExecutor<TenantName> zkWatcherExecutor,
FileDistributionFactory fileDistributionFactory,
PermanentApplicationPackage permanentApplicationPackage,
FlagSource flagSource,
ExecutorService zkCacheExecutor,
SecretStore secretStore,
HostProvisionerProvider hostProvisionerProvider,
ConfigserverConfig configserverConfig,
ConfigServerDB configServerDB,
Zone zone,
Clock clock,
ModelFactoryRegistry modelFactoryRegistry,
ConfigDefinitionRepo configDefinitionRepo,
int maxNodeSize) {
this.tenantName = tenantName;
sessionCounter = new SessionCounter(curator, tenantName);
this.sessionsPath = TenantRepository.getSessionsPath(tenantName);
this.clock = clock;
this.curator = curator;
this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime());
this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command);
this.fileDistributionFactory = fileDistributionFactory;
this.permanentApplicationPackage = permanentApplicationPackage;
this.flagSource = flagSource;
this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName);
this.applicationRepo = applicationRepo;
this.sessionPreparer = sessionPreparer;
this.metrics = metrics;
this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName));
this.secretStore = secretStore;
this.hostProvisionerProvider = hostProvisionerProvider;
this.configserverConfig = configserverConfig;
this.configServerDB = configServerDB;
this.zone = zone;
this.modelFactoryRegistry = modelFactoryRegistry;
this.configDefinitionRepo = configDefinitionRepo;
this.maxNodeSize = maxNodeSize;
loadSessions();
this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor);
this.directoryCache.addListener(this::childEvent);
this.directoryCache.start();
}
private void loadSessions() {
ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()),
new DaemonThreadFactory("load-sessions-"));
loadSessions(executor);
}
void loadSessions(ExecutorService executor) {
loadRemoteSessions(executor);
try {
executor.shutdown();
if ( ! executor.awaitTermination(1, TimeUnit.MINUTES))
log.log(Level.INFO, "Executor did not terminate");
} catch (InterruptedException e) {
log.log(Level.WARNING, "Shutdown of executor for loading sessions failed: " + Exceptions.toMessageString(e));
}
}
public void addLocalSession(LocalSession session) {
long sessionId = session.getSessionId();
localSessionCache.put(sessionId, session);
if (remoteSessionCache.get(sessionId) == null)
createRemoteSession(sessionId);
}
public LocalSession getLocalSession(long sessionId) {
return localSessionCache.get(sessionId);
}
/** Returns a copy of local sessions */
public Collection<LocalSession> getLocalSessions() {
return List.copyOf(localSessionCache.values());
}
public Set<LocalSession> getLocalSessionsFromFileSystem() {
File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter);
if (sessions == null) return Set.of();
Set<LocalSession> sessionIds = new HashSet<>();
for (File session : sessions) {
long sessionId = Long.parseLong(session.getName());
LocalSession localSession = getSessionFromFile(sessionId);
sessionIds.add(localSession);
}
return sessionIds;
}
private LocalSession getSessionFromFile(long sessionId) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
File sessionDir = getAndValidateExistingSessionAppDir(sessionId);
ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir);
return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient);
}
public Set<Long> getLocalSessionsIdsFromFileSystem() {
File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter);
if (sessions == null) return Set.of();
Set<Long> sessionIds = new HashSet<>();
for (File session : sessions) {
long sessionId = Long.parseLong(session.getName());
sessionIds.add(sessionId);
}
return sessionIds;
}
public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) {
params.vespaVersion().ifPresent(version -> {
if ( ! params.isBootstrap() && ! modelFactoryRegistry.allVersions().contains(version))
throw new UnknownVespaVersionException("Vespa version '" + version + "' not known by this config server");
});
applicationRepo.createApplication(params.getApplicationId());
logger.log(Level.FINE, "Created application " + params.getApplicationId());
long sessionId = session.getSessionId();
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId);
Optional<CompletionWaiter> waiter = params.isDryRun()
? Optional.empty()
: Optional.of(sessionZooKeeperClient.createPrepareWaiter());
Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId());
ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params,
activeApplicationSet, now, getSessionAppDir(sessionId),
session.getApplicationPackage(), sessionZooKeeperClient)
.getConfigChangeActions();
setPrepared(session);
waiter.ifPresent(w -> w.awaitCompletion(params.getTimeoutBudget().timeLeft()));
return actions;
}
/**
* Creates a new deployment session from an already existing session.
*
* @param existingSession the session to use as base
* @param internalRedeploy whether this session is for a system internal redeploy — not an application package change
* @param timeoutBudget timeout for creating session and waiting for other servers.
* @return a new session
*/
public LocalSession createSessionFromExisting(Session existingSession,
boolean internalRedeploy,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
ApplicationId existingApplicationId = existingSession.getApplicationId();
Tags existingTags = existingSession.getTags();
File existingApp = getSessionAppDir(existingSession.getSessionId());
LocalSession session = createSessionFromApplication(existingApp,
existingApplicationId,
existingTags,
internalRedeploy,
timeoutBudget,
deployLogger);
session.setApplicationId(existingApplicationId);
session.setTags(existingTags);
session.setApplicationPackageReference(existingSession.getApplicationPackageReference());
session.setVespaVersion(existingSession.getVespaVersion());
session.setDockerImageRepository(existingSession.getDockerImageRepository());
session.setAthenzDomain(existingSession.getAthenzDomain());
session.setTenantSecretStores(existingSession.getTenantSecretStores());
session.setOperatorCertificates(existingSession.getOperatorCertificates());
session.setCloudAccount(existingSession.getCloudAccount());
return session;
}
/**
* Creates a new deployment session from an application package.
*
* @param applicationDirectory a File pointing to an application.
* @param applicationId application id for this new session.
* @param timeoutBudget Timeout for creating session and waiting for other servers.
* @return a new session
*/
public LocalSession createSessionFromApplicationPackage(File applicationDirectory,
ApplicationId applicationId,
Tags tags,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
applicationRepo.createApplication(applicationId);
return createSessionFromApplication(applicationDirectory, applicationId, tags, false, timeoutBudget, deployLogger);
}
/**
* Creates a local session based on a remote session and the distributed application package.
* Does not wait for session being created on other servers.
*/
private void createLocalSession(File applicationFile, ApplicationId applicationId, Tags tags, long sessionId) {
try {
ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, tags, sessionId, false, Optional.empty());
createLocalSession(sessionId, applicationPackage);
} catch (Exception e) {
throw new RuntimeException("Error creating session " + sessionId, e);
}
}
public void deleteLocalSession(long sessionId) {
log.log(Level.FINE, () -> "Deleting local session " + sessionId);
SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId);
if (watcher != null) watcher.close();
localSessionCache.remove(sessionId);
NestedTransaction transaction = new NestedTransaction();
transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath())));
transaction.commit();
}
private void deleteAllSessions() {
for (LocalSession session : getLocalSessions()) {
deleteLocalSession(session.getSessionId());
}
}
public RemoteSession getRemoteSession(long sessionId) {
return remoteSessionCache.get(sessionId);
}
/** Returns a copy of remote sessions */
public Collection<RemoteSession> getRemoteSessions() {
return List.copyOf(remoteSessionCache.values());
}
public List<Long> getRemoteSessionsFromZooKeeper() {
return getSessionList(curator.getChildren(sessionsPath));
}
public RemoteSession createRemoteSession(long sessionId) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient);
loadSessionIfActive(session);
remoteSessionCache.put(sessionId, session);
updateSessionStateWatcher(sessionId);
return session;
}
public int deleteExpiredRemoteSessions(Clock clock) {
Duration expiryTime = configserverConfig.hostedVespa()
? sessionLifetime.multipliedBy(2)
: sessionLifetime.multipliedBy(12);
List<Long> remoteSessionsFromZooKeeper = getRemoteSessionsFromZooKeeper();
log.log(Level.FINE, () -> "Remote sessions for tenant " + tenantName + ": " + remoteSessionsFromZooKeeper);
int deleted = 0;
int deleteMax = (int) Math.min(1000, Math.max(10, remoteSessionsFromZooKeeper.size() * 0.01));
for (long sessionId : remoteSessionsFromZooKeeper) {
Session session = remoteSessionCache.get(sessionId);
if (session == null)
session = new RemoteSession(tenantName, sessionId, createSessionZooKeeperClient(sessionId));
if (session.getStatus() == Session.Status.ACTIVATE) continue;
if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) {
log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it");
deleteRemoteSessionFromZooKeeper(session);
deleted++;
}
if (deleted >= deleteMax)
break;
}
return deleted;
}
public void deactivateSession(long sessionId) {
var s = remoteSessionCache.get(sessionId);
if (s == null) return;
remoteSessionCache.put(sessionId, s.deactivated());
}
public void deleteRemoteSessionFromZooKeeper(Session session) {
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId());
Transaction transaction = sessionZooKeeperClient.deleteTransaction();
transaction.commit();
transaction.close();
}
private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) {
return created.plus(expiryTime).isBefore(clock.instant());
}
private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) {
return getSessionList(children.stream()
.map(child -> Path.fromString(child.getPath()).getName())
.collect(Collectors.toList()));
}
private List<Long> getSessionList(List<String> children) {
return children.stream().map(Long::parseLong).collect(Collectors.toList());
}
private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException {
Map<Long, Future<?>> futures = new HashMap<>();
for (long sessionId : getRemoteSessionsFromZooKeeper()) {
futures.put(sessionId, executor.submit(() -> sessionAdded(sessionId)));
}
futures.forEach((sessionId, future) -> {
try {
future.get();
log.log(Level.FINE, () -> "Remote session " + sessionId + " loaded");
} catch (ExecutionException | InterruptedException e) {
throw new RuntimeException("Could not load remote session " + sessionId, e);
}
});
}
/**
* A session for which we don't have a watcher, i.e. hitherto unknown to us.
*
* @param sessionId session id for the new session
*/
public void sessionAdded(long sessionId) {
if (hasStatusDeleted(sessionId)) return;
log.log(Level.FINE, () -> "Adding remote session " + sessionId);
Session session = createRemoteSession(sessionId);
if (session.getStatus() == Session.Status.NEW) {
log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId);
confirmUpload(session);
}
createLocalSessionFromDistributedApplicationPackage(sessionId);
}
private boolean hasStatusDeleted(long sessionId) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient);
return session.getStatus() == Session.Status.DELETE;
}
void activate(long sessionId) {
createLocalSessionFromDistributedApplicationPackage(sessionId);
RemoteSession session = remoteSessionCache.get(sessionId);
if (session == null) return;
CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter();
log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId);
applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId);
log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter);
notifyCompletion(waiter);
log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId);
}
private void loadSessionIfActive(RemoteSession session) {
for (ApplicationId applicationId : applicationRepo.activeApplications()) {
Optional<Long> activeSession = applicationRepo.activeSessionOf(applicationId);
if (activeSession.isPresent() && activeSession.get() == session.getSessionId()) {
log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it");
applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId());
log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")");
return;
}
}
}
void prepareRemoteSession(long sessionId) {
createLocalSessionFromDistributedApplicationPackage(sessionId);
RemoteSession session = remoteSessionCache.get(sessionId);
if (session == null) return;
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId);
CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter();
ensureApplicationLoaded(session);
notifyCompletion(waiter);
}
public ApplicationSet ensureApplicationLoaded(RemoteSession session) {
if (session.applicationSet().isPresent()) {
return session.applicationSet().get();
}
Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId());
Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan)
.flatMap(this::getApplicationSet);
ApplicationSet applicationSet = loadApplication(session, previousApplicationSet);
RemoteSession activated = session.activated(applicationSet);
long sessionId = activated.getSessionId();
remoteSessionCache.put(sessionId, activated);
updateSessionStateWatcher(sessionId);
return applicationSet;
}
void confirmUpload(Session session) {
CompletionWaiter waiter = createSessionZooKeeperClient(session.getSessionId()).getUploadWaiter();
long sessionId = session.getSessionId();
log.log(Level.FINE, () -> "Notifying upload waiter for session " + sessionId);
notifyCompletion(waiter);
log.log(Level.FINE, () -> "Done notifying upload for session " + sessionId);
}
void notifyCompletion(CompletionWaiter completionWaiter) {
try {
completionWaiter.notifyCompletion();
} catch (RuntimeException e) {
Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class,
KeeperException.NodeExistsException.class);
Class<? extends Throwable> exceptionClass = e.getCause().getClass();
if (acceptedExceptions.contains(exceptionClass))
log.log(Level.FINE, () -> "Not able to notify completion for session (" + completionWaiter + ")," +
" node " + (exceptionClass.equals(KeeperException.NoNodeException.class)
? "has been deleted"
: "already exists"));
else
throw e;
}
}
private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) {
log.log(Level.FINE, () -> "Loading application for " + session);
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId());
ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage();
ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(),
session.getSessionId(),
sessionZooKeeperClient,
previousApplicationSet,
sessionPreparer.getExecutor(),
curator,
metrics,
permanentApplicationPackage,
flagSource,
secretStore,
hostProvisionerProvider,
configserverConfig,
zone,
modelFactoryRegistry,
configDefinitionRepo);
return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(),
sessionZooKeeperClient.readDockerImageRepository(),
sessionZooKeeperClient.readVespaVersion(),
applicationPackage,
new AllocatedHostsFromAllModels(),
clock.instant()));
}
private void nodeChanged() {
zkWatcherExecutor.execute(() -> {
Multiset<Session.Status> sessionMetrics = HashMultiset.create();
getRemoteSessions().forEach(session -> sessionMetrics.add(session.getStatus()));
metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW));
metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE));
metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE));
metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE));
});
}
@SuppressWarnings("unused")
private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) {
zkWatcherExecutor.execute(() -> {
log.log(Level.FINE, () -> "Got child event: " + event);
switch (event.getType()) {
case CHILD_ADDED:
case CHILD_REMOVED:
case CONNECTION_RECONNECTED:
sessionsChanged();
break;
default:
break;
}
});
}
public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) {
log.log(Level.FINE, () -> "Deleting expired local sessions for tenant '" + tenantName + "'");
Set<Long> sessionIdsToDelete = new HashSet<>();
Set<Long> newSessions = findNewSessionsInFileSystem();
try {
for (long sessionId : getLocalSessionsIdsFromFileSystem()) {
if (newSessions.contains(sessionId))
continue;
var sessionZooKeeperClient = createSessionZooKeeperClient(sessionId);
Instant createTime = sessionZooKeeperClient.readCreateTime();
Session.Status status = sessionZooKeeperClient.readStatus();
log.log(Level.FINE, () -> "Candidate local session for deletion: " + sessionId +
", created: " + createTime + ", status " + status + ", can be deleted: " + canBeDeleted(sessionId, status) +
", hasExpired: " + hasExpired(createTime));
if (hasExpired(createTime) && canBeDeleted(sessionId, status)) {
log.log(Level.FINE, () -> "expired: " + hasExpired(createTime) + ", can be deleted: " + canBeDeleted(sessionId, status));
sessionIdsToDelete.add(sessionId);
} else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) {
LocalSession session;
log.log(Level.FINE, () -> "not expired, but more than 1 day old: " + sessionId);
try {
session = getSessionFromFile(sessionId);
} catch (Exception e) {
log.log(Level.FINE, () -> "could not get session from file: " + sessionId + ": " + e.getMessage());
continue;
}
Optional<ApplicationId> applicationId = session.getOptionalApplicationId();
if (applicationId.isEmpty()) continue;
Long activeSession = activeSessions.get(applicationId.get());
if (activeSession == null || activeSession != sessionId) {
sessionIdsToDelete.add(sessionId);
log.log(Level.FINE, () -> "Will delete inactive session " + sessionId + " created " +
createTime + " for '" + applicationId + "'");
}
}
}
sessionIdsToDelete.forEach(this::deleteLocalSession);
} catch (Throwable e) {
log.log(Level.WARNING, "Error when purging old sessions ", e);
}
log.log(Level.FINE, () -> "Done purging old sessions");
}
private boolean hasExpired(Instant created) {
return created.plus(sessionLifetime).isBefore(clock.instant());
}
private boolean canBeDeleted(long sessionId, Session.Status status) {
return ( ! List.of(Session.Status.UNKNOWN, Session.Status.ACTIVATE).contains(status))
|| oldSessionDirWithUnknownStatus(sessionId, status);
}
private boolean oldSessionDirWithUnknownStatus(long sessionId, Session.Status status) {
Duration expiryTime = Duration.ofHours(configserverConfig.keepSessionsWithUnknownStatusHours());
File sessionDir = tenantFileSystemDirs.getUserApplicationDir(sessionId);
return sessionDir.exists()
&& status == Session.Status.UNKNOWN
&& created(sessionDir).plus(expiryTime).isBefore(clock.instant());
}
private Set<Long> findNewSessionsInFileSystem() {
File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter);
Set<Long> newSessions = new HashSet<>();
if (sessions != null) {
for (File session : sessions) {
try {
if (Files.getLastModifiedTime(session.toPath()).toInstant()
.isAfter(clock.instant().minus(Duration.ofSeconds(30))))
newSessions.add(Long.parseLong(session.getName()));
} catch (IOException e) {
log.log(Level.FINE, "Unable to find last modified time for " + session.toPath());
}
}
}
return newSessions;
}
private Instant created(File file) {
BasicFileAttributes fileAttributes;
try {
fileAttributes = readAttributes(file.toPath(), BasicFileAttributes.class);
return fileAttributes.creationTime().toInstant();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void ensureSessionPathDoesNotExist(long sessionId) {
Path sessionPath = getSessionPath(sessionId);
if (curator.exists(sessionPath)) {
throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper");
}
}
private ApplicationPackage createApplication(File userDir,
File configApplicationDir,
ApplicationId applicationId,
Tags tags,
long sessionId,
Optional<Long> currentlyActiveSessionId,
boolean internalRedeploy,
Optional<DeployLogger> deployLogger) {
long deployTimestamp = System.currentTimeMillis();
DeployData deployData = new DeployData(userDir.getAbsolutePath(), applicationId, tags, deployTimestamp, internalRedeploy,
sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId));
FilesApplicationPackage app = FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData);
validateFileExtensions(applicationId, deployLogger, app);
return app;
}
private void validateFileExtensions(ApplicationId applicationId, Optional<DeployLogger> deployLogger, FilesApplicationPackage app) {
try {
app.validateFileExtensions();
} catch (IllegalArgumentException e) {
if (configserverConfig.hostedVespa()) {
UnboundStringFlag flag = PermanentFlags.APPLICATION_FILES_WITH_UNKNOWN_EXTENSION;
String value = flag.bindTo(flagSource).with(APPLICATION_ID, applicationId.serializedForm()).value();
switch (value) {
case "FAIL" -> throw new InvalidApplicationException(e);
case "LOG" -> deployLogger.ifPresent(logger -> logger.logApplicationPackage(Level.WARNING, e.getMessage()));
default -> log.log(Level.WARNING, "Unknown value for flag " + flag.id() + ": " + value);
}
} else {
deployLogger.ifPresent(logger -> logger.logApplicationPackage(Level.WARNING, e.getMessage()));
}
}
}
private LocalSession createSessionFromApplication(File applicationDirectory,
ApplicationId applicationId,
Tags tags,
boolean internalRedeploy,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
long sessionId = getNextSessionId();
try {
ensureSessionPathDoesNotExist(sessionId);
ApplicationPackage app = createApplicationPackage(applicationDirectory, applicationId, tags, sessionId, internalRedeploy, Optional.of(deployLogger));
log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper");
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
sessionZKClient.createNewSession(clock.instant());
CompletionWaiter waiter = sessionZKClient.getUploadWaiter();
LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient);
waiter.awaitCompletion(Duration.ofSeconds(Math.min(120, timeoutBudget.timeLeft().getSeconds())));
addLocalSession(session);
return session;
} catch (IOException e) {
throw new RuntimeException("Error creating session " + sessionId, e);
}
}
private ApplicationPackage createApplicationPackage(File applicationDirectory,
ApplicationId applicationId,
Tags tags,
long sessionId,
boolean internalRedeploy,
Optional<DeployLogger> deployLogger) throws IOException {
synchronized (monitor) {
Optional<Long> activeSessionId = getActiveSessionId(applicationId);
File userApplicationDir = getSessionAppDir(sessionId);
copyApp(applicationDirectory, userApplicationDir);
ApplicationPackage applicationPackage = createApplication(applicationDirectory,
userApplicationDir,
applicationId,
tags,
sessionId,
activeSessionId,
internalRedeploy,
deployLogger);
applicationPackage.writeMetaData();
return applicationPackage;
}
}
public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) {
return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet);
}
private Optional<ApplicationSet> getApplicationSet(long sessionId) {
Optional<ApplicationSet> applicationSet = Optional.empty();
try {
applicationSet = Optional.ofNullable(getRemoteSession(sessionId)).map(this::ensureApplicationLoaded);
} catch (IllegalArgumentException e) {
}
return applicationSet;
}
private void copyApp(File sourceDir, File destinationDir) throws IOException {
if (destinationDir.exists()) {
log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied");
return;
}
if (! sourceDir.isDirectory())
throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory");
java.nio.file.Path tempDestinationDir = null;
try {
tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package");
log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath());
IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile());
moveSearchDefinitionsToSchemasDir(tempDestinationDir);
log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath());
Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE);
} finally {
if (tempDestinationDir != null)
IOUtils.recursiveDeleteDir(tempDestinationDir.toFile());
}
}
/**
* Returns a new session instance for the given session id.
*/
void createSessionFromId(long sessionId) {
File sessionDir = getAndValidateExistingSessionAppDir(sessionId);
ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir);
createLocalSession(sessionId, applicationPackage);
}
void createLocalSession(long sessionId, ApplicationPackage applicationPackage) {
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient);
addLocalSession(session);
}
/**
* Create a new local session for the given session id if it does not already exist.
* Will also add the session to the local session cache if necessary. If there is no
* remote session matching the session it will also be created.
*/
public void createLocalSessionFromDistributedApplicationPackage(long sessionId) {
if (applicationRepo.sessionExistsInFileSystem(sessionId)) {
log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists");
createSessionFromId(sessionId);
return;
}
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
FileReference fileReference = sessionZKClient.readApplicationPackageReference();
log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference);
if (fileReference != null) {
File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir()));
File sessionDir;
FileDirectory fileDirectory = new FileDirectory(rootDir);
try {
sessionDir = fileDirectory.getFile(fileReference);
} catch (IllegalArgumentException e) {
log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory);
return;
}
ApplicationId applicationId = sessionZKClient.readApplicationId()
.orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId));
log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId);
try {
createLocalSession(sessionDir, applicationId, sessionZKClient.readTags(), sessionId);
} finally {
log.log(Level.FINE, "Deleting file distribution reference for app package with session id " + sessionDir);
IOUtils.recursiveDeleteDir(sessionDir);
}
}
}
private Optional<Long> getActiveSessionId(ApplicationId applicationId) {
return applicationRepo.activeSessionOf(applicationId);
}
private long getNextSessionId() {
return sessionCounter.nextSessionId();
}
public Path getSessionPath(long sessionId) {
return sessionsPath.append(String.valueOf(sessionId));
}
Path getSessionStatePath(long sessionId) {
return getSessionPath(sessionId).append(ZKApplication.SESSIONSTATE_ZK_SUBPATH);
}
public SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) {
return new SessionZooKeeperClient(curator,
tenantName,
sessionId,
configserverConfig.serverId(),
fileDistributionFactory.createFileManager(getSessionAppDir(sessionId)),
maxNodeSize);
}
private File getAndValidateExistingSessionAppDir(long sessionId) {
File appDir = getSessionAppDir(sessionId);
if (!appDir.exists() || !appDir.isDirectory()) {
throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId);
}
return appDir;
}
private File getSessionAppDir(long sessionId) {
return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId);
}
private void updateSessionStateWatcher(long sessionId) {
sessionStateWatchers.computeIfAbsent(sessionId, (id) -> {
Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(id).getAbsolute(), false);
fileCache.addListener(this::nodeChanged);
return new SessionStateWatcher(fileCache, id, metricUpdater, zkWatcherExecutor, this);
});
}
@Override
public String toString() {
return getLocalSessions().toString();
}
public Clock clock() { return clock; }
public void close() {
deleteAllSessions();
tenantFileSystemDirs.delete();
try {
if (directoryCache != null) {
directoryCache.close();
}
} catch (Exception e) {
log.log(Level.WARNING, "Exception when closing path cache", e);
} finally {
checkForRemovedSessions(new ArrayList<>());
}
}
private void sessionsChanged() throws NumberFormatException {
List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData());
checkForRemovedSessions(sessions);
checkForAddedSessions(sessions);
}
private void checkForRemovedSessions(List<Long> existingSessions) {
for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) {
long sessionId = it.next().sessionId;
if (existingSessions.contains(sessionId)) continue;
SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId);
if (watcher != null) watcher.close();
it.remove();
metricUpdater.incRemovedSessions();
}
}
private void checkForAddedSessions(List<Long> sessions) {
for (Long sessionId : sessions)
if (remoteSessionCache.get(sessionId) == null)
sessionAdded(sessionId);
}
public Transaction createActivateTransaction(Session session) {
Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE);
transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations());
return transaction;
}
public Transaction createSetStatusTransaction(Session session, Session.Status status) {
return session.sessionZooKeeperClient.createWriteStatusTransaction(status);
}
void setPrepared(Session session) {
session.setStatus(Session.Status.PREPARE);
}
private static class FileTransaction extends AbstractTransaction {
public static FileTransaction from(FileOperation operation) {
FileTransaction transaction = new FileTransaction();
transaction.add(operation);
return transaction;
}
@Override
public void prepare() { }
@Override
public void commit() {
for (Operation operation : operations())
((FileOperation)operation).commit();
}
}
/** Factory for file operations */
private static class FileOperations {
/** Creates an operation which recursively deletes the given path */
public static DeleteOperation delete(String pathToDelete) {
return new DeleteOperation(pathToDelete);
}
}
private interface FileOperation extends Transaction.Operation {
void commit();
}
/**
* Recursively deletes this path and everything below.
* Succeeds with no action if the path does not exist.
*/
private static class DeleteOperation implements FileOperation {
private final String pathToDelete;
DeleteOperation(String pathToDelete) {
this.pathToDelete = pathToDelete;
}
@Override
public void commit() {
IOUtils.recursiveDeleteDir(new File(pathToDelete));
}
}
} |
This JSON is sent to a Vespa application, so need to add this field there (Not an issue for this PR since this is not enabled yet, just reminder :smile: ) | String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
} | decryptionToken.ifPresent(token -> metadata.put("decryption_token", token)); | String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} |
`getMetadata()` writes the resulting metadata to a file, but if the file already exists, it'll return the data from the file ignoring the arguments. If we fail to report this coredump, we will try again later, but then we'll end up encrypting the core with the new key while the metadata will have the old `decryption_token`. | void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
} | String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken); | void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
}
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} |
Hmm, very good point. Is there any kind of non-idempotence (or something else) that means we can't simply overwrite the file if it already exists? By design it's impossible for the core dump logic to extract the _old_ encryption key from the metadata 😬 | void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
} | String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken); | void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
}
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} |
No problem overwriting it, this is done to avoid having to reprocess the coredump with `gdb` etc. | void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
} | String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken); | void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
}
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} |
Understood. Presumably this should only happen if either a) core dump reporting fails, or b) writing the compressed/encrypted core dump fails? I'd expect both of those to be fairly rare occurrences. For now I think the pragmatic route with overwriting should be fine; we can later consider other options such as patching in a new token into the existing metadata if we feel it makes sense (it's "just JSON"™ after all). | void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
} | String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken); | void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
}
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} |
Thanks, I'll make sure to update the schema prior to wiring this in anywhere. | String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
} | decryptionToken.ifPresent(token -> metadata.put("decryption_token", token)); | String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} |
Agree :+1: | void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
} | String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken); | void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
}
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} |
Patching in the new token value turned out to be not that much extra work after all, so I went with that. PTAL. | void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
} | String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken); | void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String COMPRESSED_EXTENSION = ".zstd";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics) {
this(coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/);
}
CoredumpHandler(CoreCollector coreCollector, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier) {
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(containerCrashPath, containerProcessingPath)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> !HS_ERR_PATTERN.matcher(toProcess.get(i).getFileName().toString()).matches())
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = (ContainerPath) uncheck(() -> Files.createDirectories(containerProcessingPath.resolve(coredumpIdSupplier.get())));
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
uncheck(() -> Files.move(path, enqueuedDir.resolve(prefix + path.getFileName())));
});
return Optional.of(enqueuedDir);
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
}
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) throws IOException {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Files.delete(coreFile);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
uncheck(() -> Files.createDirectories(newCoredumpDirectory));
Files.move(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()));
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
} |
Use `Files.exists(path)` instead of converting to `File`. The latter is basically the legacy API for file management. | public static InputStream inputStreamFromFileOrStream(String pathOrDash, InputStream stdIn) throws IOException {
if ("-".equals(pathOrDash)) {
return stdIn;
} else {
var inputPath = Paths.get(pathOrDash);
if (!inputPath.toFile().exists()) {
throw new IllegalArgumentException("Input file '%s' does not exist".formatted(inputPath.toString()));
}
return Files.newInputStream(inputPath);
}
} | if (!inputPath.toFile().exists()) { | public static InputStream inputStreamFromFileOrStream(String pathOrDash, InputStream stdIn) throws IOException {
if ("-".equals(pathOrDash)) {
return stdIn;
} else {
var inputPath = Paths.get(pathOrDash);
if (!Files.exists(inputPath)) {
throw new IllegalArgumentException("Input file '%s' does not exist".formatted(inputPath.toString()));
}
return Files.newInputStream(inputPath);
}
} | class CliUtils {
public static String optionOrThrow(CommandLine arguments, String option) {
var value = arguments.getOptionValue(option);
if (value == null) {
throw new IllegalArgumentException("Required argument '--%s' must be provided".formatted(option));
}
return value;
}
public static OutputStream outputStreamToFileOrStream(String pathOrDash, OutputStream stdOut) throws IOException {
if ("-".equals(pathOrDash)) {
return stdOut;
} else {
return Files.newOutputStream(Paths.get(pathOrDash));
}
}
} | class CliUtils {
public static String optionOrThrow(CommandLine arguments, String option) {
var value = arguments.getOptionValue(option);
if (value == null) {
throw new IllegalArgumentException("Required argument '--%s' must be provided".formatted(option));
}
return value;
}
public static OutputStream outputStreamToFileOrStream(String pathOrDash, OutputStream stdOut) throws IOException {
if ("-".equals(pathOrDash)) {
return stdOut;
} else {
return Files.newOutputStream(Paths.get(pathOrDash));
}
}
} |
Fixed here and in some other places as well. | public static InputStream inputStreamFromFileOrStream(String pathOrDash, InputStream stdIn) throws IOException {
if ("-".equals(pathOrDash)) {
return stdIn;
} else {
var inputPath = Paths.get(pathOrDash);
if (!inputPath.toFile().exists()) {
throw new IllegalArgumentException("Input file '%s' does not exist".formatted(inputPath.toString()));
}
return Files.newInputStream(inputPath);
}
} | if (!inputPath.toFile().exists()) { | public static InputStream inputStreamFromFileOrStream(String pathOrDash, InputStream stdIn) throws IOException {
if ("-".equals(pathOrDash)) {
return stdIn;
} else {
var inputPath = Paths.get(pathOrDash);
if (!Files.exists(inputPath)) {
throw new IllegalArgumentException("Input file '%s' does not exist".formatted(inputPath.toString()));
}
return Files.newInputStream(inputPath);
}
} | class CliUtils {
public static String optionOrThrow(CommandLine arguments, String option) {
var value = arguments.getOptionValue(option);
if (value == null) {
throw new IllegalArgumentException("Required argument '--%s' must be provided".formatted(option));
}
return value;
}
public static OutputStream outputStreamToFileOrStream(String pathOrDash, OutputStream stdOut) throws IOException {
if ("-".equals(pathOrDash)) {
return stdOut;
} else {
return Files.newOutputStream(Paths.get(pathOrDash));
}
}
} | class CliUtils {
public static String optionOrThrow(CommandLine arguments, String option) {
var value = arguments.getOptionValue(option);
if (value == null) {
throw new IllegalArgumentException("Required argument '--%s' must be provided".formatted(option));
}
return value;
}
public static OutputStream outputStreamToFileOrStream(String pathOrDash, OutputStream stdOut) throws IOException {
if ("-".equals(pathOrDash)) {
return stdOut;
} else {
return Files.newOutputStream(Paths.get(pathOrDash));
}
}
} |
This suggests it is possible to end up with a ...host in ready but with wantToDeprovision -- why do we want to allow that? I thought the idea was that with wantToDeprovision, when host-admin reports the host is ready, the config server should instead move it to parked. Also, the check in markNodeAvailableForNewAllocation() about reasonsToFailHost must be ignored with wantToDeprovision. | public Node setReady(NodeMutex nodeMutex, Agent agent, String reason) {
Node node = nodeMutex.node();
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
if (!node.status().wantToDeprovision())
node = node.withWantToRetire(false, false, false, agent, clock.instant());
return db.writeTo(Node.State.ready, node, agent, Optional.of(reason));
} | if (!node.status().wantToDeprovision()) | public Node setReady(NodeMutex nodeMutex, Agent agent, String reason) {
Node node = nodeMutex.node();
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
if (!node.status().wantToDeprovision())
node = node.withWantToRetire(false, false, false, agent, clock.instant());
return db.writeTo(Node.State.ready, node, agent, Optional.of(reason));
} | class Nodes {
private static final Logger log = Logger.getLogger(Nodes.class.getName());
private final CuratorDatabaseClient db;
private final Zone zone;
private final Clock clock;
private final Orchestrator orchestrator;
private final Applications applications;
public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator, Applications applications) {
this.zone = zone;
this.clock = clock;
this.db = db;
this.orchestrator = orchestrator;
this.applications = applications;
}
/** Read and write all nodes to make sure they are stored in the latest version of the serialized format */
public void rewrite() {
Instant start = clock.instant();
int nodesWritten = 0;
for (Node.State state : Node.State.values()) {
List<Node> nodes = db.readNodes(state);
db.writeTo(state, nodes, Agent.system, Optional.empty());
nodesWritten += nodes.size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
*/
public NodeList list(Node.State... inState) {
return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
}
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
* and we should probably refrain from making changes to it.
*/
public boolean isWorking() {
NodeList activeNodes = list(Node.State.active);
if (activeNodes.size() <= 5) return true;
NodeList downNodes = activeNodes.down();
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
}
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists");
}
return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as provisioned nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
boolean rebuilding = existing.get().status().wantToRebuild();
if (rebuilding) {
node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(),
false,
rebuilding));
}
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
NestedTransaction transaction = new NestedTransaction();
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction);
db.removeNodes(nodesToRemove, transaction);
transaction.commit();
return resultingNodes;
}
}
/** Sets a node to ready and returns the node in the ready state */
/** Reserve nodes. This method does <b>not</b> lock the node repository. */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository. */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state
* @param reusable move the node directly to {@link Node.State
*/
public void setRemovable(ApplicationId application, List<Node> nodes, boolean reusable) {
try (Mutex lock = applications.lock(application)) {
List<Node> removableNodes = nodes.stream()
.map(node -> node.with(node.allocation().get().removable(true, reusable)))
.toList();
write(removableNodes, lock);
}
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
if ( ! zone.environment().isProduction() || zone.system().isCd())
return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested());
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
var statefulToInactive = stateful.not().reusable();
var statefulToDirty = stateful.reusable();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(deallocate(statefulToDirty.asList(), Agent.application, "Deactivated by application (recycled)", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, statefulToInactive.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
}
/**
* Fails these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) {
return fail(nodes, Agent.application, "Failed by application", transaction.nested());
}
public List<Node> fail(List<Node> nodes, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
nodes = fail(nodes, agent, reason, transaction);
transaction.commit();
return nodes;
}
private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
nodes = nodes.stream()
.map(n -> n.withWantToFail(false, agent, clock.instant()))
.collect(Collectors.toList());
return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction);
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " +
hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty)).filter(node -> node.state() != Node.State.dirty).toList();
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.filter(node -> node.state() != Node.State.breakfixed)
.map(Node::hostname).toList();
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList());
}
/**
* Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node deallocated = deallocate(node, agent, reason, transaction);
transaction.commit();
return deallocated;
}
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList());
}
public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (parkOnDeallocationOf(node, agent)) {
return park(node.hostname(), false, agent, reason, transaction);
} else {
Node.State toState = Node.State.dirty;
if (node.state() == Node.State.parked) {
if (node.status().wantToDeprovision()) throw new IllegalArgumentException("Cannot move " + node + " to " + toState + ": It's being deprovisioned");
if (node.status().wantToRebuild()) throw new IllegalArgumentException("Cannot move " + node + " to " + toState + ": It's being rebuilt");
}
return db.writeTo(toState, List.of(node), agent, Optional.of(reason), transaction).get(0);
}
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return fail(hostname, false, agent, reason);
}
public Node fail(String hostname, boolean wantToDeprovision, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, wantToDeprovision, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
* Non-active nodes are failed immediately, while active nodes are marked as wantToFail.
* The host is failed if it has no active nodes and marked wantToFail if it has.
*
* @return all the nodes that were changed by this request
*/
public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) {
NodeList children = list().childrenOf(hostname);
List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock));
if (children.state(Node.State.active).isEmpty())
changed.add(move(hostname, Node.State.failed, agent, false, Optional.of(reason)));
else
changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock)));
return changed;
}
private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) {
if (node.state() == Node.State.active) {
node = node.withWantToFail(true, agent, clock.instant());
write(node, lock);
return node;
} else {
return move(node.hostname(), Node.State.failed, agent, false, Optional.of(reason));
}
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean wantToDeprovision, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node parked = park(hostname, wantToDeprovision, agent, reason, transaction);
transaction.commit();
return parked;
}
private Node park(String hostname, boolean wantToDeprovision, Agent agent, String reason, NestedTransaction transaction) {
return move(hostname, Node.State.parked, agent, wantToDeprovision, Optional.of(reason), transaction);
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.active, agent, false, Optional.of(reason));
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
try (Mutex lock = lockUnallocated()) {
requireBreakfixable(node);
NestedTransaction transaction = new NestedTransaction();
List<Node> removed = removeChildren(node, false, transaction);
removed.add(move(node.hostname(), Node.State.breakfixed, agent, false, Optional.of(reason), transaction));
transaction.commit();
return removed;
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child.hostname(), toState, agent, false, reason, transaction))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, false, reason, transaction));
transaction.commit();
return moved;
}
/** Move a node to given state */
private Node move(String hostname, Node.State toState, Agent agent, boolean wantToDeprovision, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, toState, agent, wantToDeprovision, reason, transaction);
transaction.commit();
return moved;
}
/** Move a node to given state as part of a transaction */
private Node move(String hostname, Node.State toState, Agent agent, boolean wantToDeprovision, Optional<String> reason, NestedTransaction transaction) {
try (NodeMutex lock = lockAndGetRequired(hostname)) {
Node node = lock.node();
if (toState == Node.State.active) {
if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation");
for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
if (wantToDeprovision)
node = node.withWantToRetire(wantToDeprovision, wantToDeprovision, agent, clock.instant());
if (toState == Node.State.deprovisioned) {
node = node.with(IP.Config.EMPTY);
}
return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For Linux
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
try (NodeMutex nodeMutex = lockAndGetRequired(hostname)) {
Node node = nodeMutex.node();
if (node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
return node;
}
if (node.state() == Node.State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost);
if (!failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(nodeMutex, agent, reason);
}
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = requireNode(hostname);
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
NestedTransaction transaction = new NestedTransaction();
final List<Node> removed;
if (!node.type().isHost()) {
removed = List.of(node);
db.removeNodes(removed, transaction);
} else {
removed = removeChildren(node, force, transaction);
if (zone.cloud().dynamicProvisioning()) {
db.removeNodes(List.of(node), transaction);
} else {
move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
}
removed.add(node);
}
transaction.commit();
return removed;
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
if (node.status().wantToRebuild())
throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
}
private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children, transaction);
return new ArrayList<>(children);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node:
* - non-recursively: node is unallocated
* - recursively: node is unallocated or node is in failed|parked
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* - non-recursively: node in state ready
* - recursively: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingRecursively, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (!removingRecursively || !removableStates.contains(node.state()))
illegal(node + " is currently allocated and cannot be removed while in " + node.state());
}
final Set<Node.State> removableStates;
if (node.type().isHost()) {
removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
} else {
removableStates = removingRecursively
? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready)
: EnumSet.of(Node.State.ready);
}
if (!removableStates.contains(node.state()))
illegal(node + " can not be removed while in " + node.state());
}
/**
* Throws if given node cannot be breakfixed.
* Breakfix is allowed if the following is true:
* - Node is tenant host
* - Node is in zone without dynamic provisioning
* - Node is in parked or failed state
*/
private void requireBreakfixable(Node node) {
if (zone.cloud().dynamicProvisioning()) {
illegal("Can not breakfix in zone: " + zone);
}
if (node.type() != NodeType.host) {
illegal(node + " can not be breakfixed as it is not a tenant host");
}
Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restartActive(Predicate<Node> filter) {
return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter));
}
/**
* Increases the restart generation of the any nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restart(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> reboot(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> write(node.withWantedOsVersion(version), lock));
}
/** Retire nodes matching given filter */
public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/** Retire and deprovision given host and all of its children */
public List<Node> deprovision(String hostname, Agent agent, Instant instant) {
return decommission(hostname, HostOperation.deprovision, agent, instant);
}
/** Rebuild given host */
public List<Node> rebuild(String hostname, boolean soft, Agent agent, Instant instant) {
return decommission(hostname, soft ? HostOperation.softRebuild : HostOperation.rebuild, agent, instant);
}
private List<Node> decommission(String hostname, HostOperation op, Agent agent, Instant instant) {
Optional<NodeMutex> nodeMutex = lockAndGet(hostname);
if (nodeMutex.isEmpty()) return List.of();
Node host = nodeMutex.get().node();
if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host);
boolean wantToDeprovision = op == HostOperation.deprovision;
boolean wantToRebuild = op == HostOperation.rebuild || op == HostOperation.softRebuild;
boolean wantToRetire = op.needsRetirement();
List<Node> result = new ArrayList<>();
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
Node newHost = lock.node().withWantToRetire(wantToRetire, wantToDeprovision, wantToRebuild, agent, instant);
result.add(write(newHost, lock));
}
if (wantToRetire) {
List<Node> updatedNodes = performOn(list().childrenOf(host), (node, nodeLock) -> {
Node newNode = node.withWantToRetire(wantToRetire, wantToDeprovision, wantToRebuild, agent, instant);
return write(newNode, nodeLock);
});
result.addAll(updatedNodes);
}
return result;
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
return performOn(list().matching(filter), action);
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : nodes) {
Optional<ApplicationId> applicationId = applicationIdForLock(node);
if (applicationId.isPresent())
allocatedNodes.put(applicationId.get(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = applications.lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
return canAllocateTenantNodeTo(host, zone.cloud().dynamicProvisioning());
}
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if (suspended(host)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
else
return host.state() == Node.State.active;
}
public boolean suspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended();
} catch (HostNameNotFoundException e) {
return false;
}
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
private Optional<NodeMutex> lockAndGet(Node node, Optional<Duration> timeout) {
Node staleNode = node;
final int maxRetries = 4;
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode, timeout);
try {
Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
}
if (node.type() != NodeType.tenant ||
Objects.equals(freshNode.get().allocation().map(Allocation::owner),
staleNode.allocation().map(Allocation::owner))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
}
staleNode = freshNode.get();
} finally {
if (lockToClose != null) lockToClose.close();
}
}
throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " +
"fetching an up to date node under lock: " + node.hostname());
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname, Duration timeout) {
return node(hostname).flatMap(node -> lockAndGet(node, Optional.of(timeout)));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node) { return lockAndGet(node, Optional.empty()); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node, Duration timeout) { return lockAndGet(node, Optional.of(timeout)); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(Node node) {
return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname) {
return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname, Duration timeout) {
return lockAndGet(hostname, timeout).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private Mutex lock(Node node, Optional<Duration> timeout) {
Optional<ApplicationId> application = applicationIdForLock(node);
if (application.isPresent())
return timeout.map(t -> applications.lock(application.get(), t))
.orElseGet(() -> applications.lock(application.get()));
else
return timeout.map(db::lockInactive).orElseGet(db::lockInactive);
}
private Node requireNode(String hostname) {
return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
/** Returns the application ID that should be used for locking when modifying this node */
private static Optional<ApplicationId> applicationIdForLock(Node node) {
return switch (node.type()) {
case tenant -> node.allocation().map(Allocation::owner);
case host -> Optional.of(InfrastructureApplication.TENANT_HOST.id());
case config -> Optional.of(InfrastructureApplication.CONFIG_SERVER.id());
case confighost -> Optional.of(InfrastructureApplication.CONFIG_SERVER_HOST.id());
case controller -> Optional.of(InfrastructureApplication.CONTROLLER.id());
case controllerhost -> Optional.of(InfrastructureApplication.CONTROLLER_HOST.id());
case proxy -> Optional.of(InfrastructureApplication.PROXY.id());
case proxyhost -> Optional.of(InfrastructureApplication.PROXY_HOST.id());
};
}
private static void illegal(String message) {
throw new IllegalArgumentException(message);
}
/** Returns whether node should be parked when deallocated by given agent */
private static boolean parkOnDeallocationOf(Node node, Agent agent) {
if (node.state() == Node.State.parked) return false;
if (agent == Agent.operator) return false;
if (node.type() == NodeType.tenant && node.status().wantToDeprovision()) return false;
boolean retirementRequestedByOperator = node.status().wantToRetire() &&
node.history().event(History.Event.Type.wantToRetire)
.map(History.Event::agent)
.map(a -> a == Agent.operator)
.orElse(false);
return node.status().wantToDeprovision() ||
node.status().wantToRebuild() ||
retirementRequestedByOperator;
}
private enum HostOperation {
/** Host is deprovisioned and data is destroyed */
deprovision(true),
/** Host is deprovisioned, the same host is later re-provisioned and data is destroyed */
rebuild(true),
/** Host is stopped and re-bootstrapped, data is preserved */
softRebuild(false);
private final boolean needsRetirement;
HostOperation(boolean needsRetirement) {
this.needsRetirement = needsRetirement;
}
/** Returns whether this operation requires the host and its children to be retired */
public boolean needsRetirement() {
return needsRetirement;
}
}
} | class Nodes {
private static final Logger log = Logger.getLogger(Nodes.class.getName());
private final CuratorDatabaseClient db;
private final Zone zone;
private final Clock clock;
private final Orchestrator orchestrator;
private final Applications applications;
public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator, Applications applications) {
this.zone = zone;
this.clock = clock;
this.db = db;
this.orchestrator = orchestrator;
this.applications = applications;
}
/** Read and write all nodes to make sure they are stored in the latest version of the serialized format */
public void rewrite() {
Instant start = clock.instant();
int nodesWritten = 0;
for (Node.State state : Node.State.values()) {
List<Node> nodes = db.readNodes(state);
db.writeTo(state, nodes, Agent.system, Optional.empty());
nodesWritten += nodes.size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
}
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
* Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
*/
public NodeList list(Node.State... inState) {
return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
}
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
* and we should probably refrain from making changes to it.
*/
public boolean isWorking() {
NodeList activeNodes = list(Node.State.active);
if (activeNodes.size() <= 5) return true;
NodeList downNodes = activeNodes.down();
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
}
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists");
}
return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
}
/**
* Adds a list of (newly created) nodes to the node repository as provisioned nodes.
* If any of the nodes already exists in the deprovisioned state, the new node will be merged
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
try (Mutex lock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
for (int j = 0; j < i; j++) {
if (node.equals(nodes.get(j)))
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
node = node.with(existing.get().history());
node = node.with(existing.get().reports());
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
boolean rebuilding = existing.get().status().wantToRebuild();
if (rebuilding) {
node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(),
false,
rebuilding));
}
nodesToRemove.add(existing.get());
}
nodesToAdd.add(node);
}
NestedTransaction transaction = new NestedTransaction();
List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction);
db.removeNodes(nodesToRemove, transaction);
transaction.commit();
return resultingNodes;
}
}
/** Sets a node to ready and returns the node in the ready state */
/** Reserve nodes. This method does <b>not</b> lock the node repository. */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository. */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state
* @param reusable move the node directly to {@link Node.State
*/
public void setRemovable(ApplicationId application, List<Node> nodes, boolean reusable) {
try (Mutex lock = applications.lock(application)) {
List<Node> removableNodes = nodes.stream()
.map(node -> node.with(node.allocation().get().removable(true, reusable)))
.toList();
write(removableNodes, lock);
}
}
/**
* Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
if ( ! zone.environment().isProduction() || zone.system().isCd())
return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested());
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
var statefulToInactive = stateful.not().reusable();
var statefulToDirty = stateful.reusable();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(deallocate(statefulToDirty.asList(), Agent.application, "Deactivated by application (recycled)", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, statefulToInactive.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
}
/**
* Fails these nodes in a transaction and returns the nodes in the new state which will hold if the
* transaction commits.
*/
public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) {
return fail(nodes, Agent.application, "Failed by application", transaction.nested());
}
public List<Node> fail(List<Node> nodes, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
nodes = fail(nodes, agent, reason, transaction);
transaction.commit();
return nodes;
}
private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
nodes = nodes.stream()
.map(n -> n.withWantToFail(false, agent, clock.instant()))
.collect(Collectors.toList());
return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction);
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " +
hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty)).filter(node -> node.state() != Node.State.dirty).toList();
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.filter(node -> node.state() != Node.State.breakfixed)
.map(Node::hostname).toList();
if ( ! hostnamesNotAllowedToDirty.isEmpty())
illegal("Could not deallocate " + nodeToDirty + ": " +
hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList());
}
/**
* Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node deallocated = deallocate(node, agent, reason, transaction);
transaction.commit();
return deallocated;
}
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList());
}
public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (parkOnDeallocationOf(node, agent)) {
return park(node.hostname(), false, agent, reason, transaction);
} else {
Node.State toState = Node.State.dirty;
if (node.state() == Node.State.parked) {
if (node.status().wantToDeprovision()) throw new IllegalArgumentException("Cannot move " + node + " to " + toState + ": It's being deprovisioned");
if (node.status().wantToRebuild()) throw new IllegalArgumentException("Cannot move " + node + " to " + toState + ": It's being rebuilt");
}
return db.writeTo(toState, List.of(node), agent, Optional.of(reason), transaction).get(0);
}
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return fail(hostname, false, agent, reason);
}
public Node fail(String hostname, boolean wantToDeprovision, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, wantToDeprovision, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
* Non-active nodes are failed immediately, while active nodes are marked as wantToFail.
* The host is failed if it has no active nodes and marked wantToFail if it has.
*
* @return all the nodes that were changed by this request
*/
public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) {
NodeList children = list().childrenOf(hostname);
List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock));
if (children.state(Node.State.active).isEmpty())
changed.add(move(hostname, Node.State.failed, agent, false, Optional.of(reason)));
else
changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock)));
return changed;
}
private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) {
if (node.state() == Node.State.active) {
node = node.withWantToFail(true, agent, clock.instant());
write(node, lock);
return node;
} else {
return move(node.hostname(), Node.State.failed, agent, false, Optional.of(reason));
}
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean wantToDeprovision, Agent agent, String reason) {
NestedTransaction transaction = new NestedTransaction();
Node parked = park(hostname, wantToDeprovision, agent, reason, transaction);
transaction.commit();
return parked;
}
private Node park(String hostname, boolean wantToDeprovision, Agent agent, String reason, NestedTransaction transaction) {
return move(hostname, Node.State.parked, agent, wantToDeprovision, Optional.of(reason), transaction);
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.active, agent, false, Optional.of(reason));
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
Node node = requireNode(hostname);
try (Mutex lock = lockUnallocated()) {
requireBreakfixable(node);
NestedTransaction transaction = new NestedTransaction();
List<Node> removed = removeChildren(node, false, transaction);
removed.add(move(node.hostname(), Node.State.breakfixed, agent, false, Optional.of(reason), transaction));
transaction.commit();
return removed;
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child.hostname(), toState, agent, false, reason, transaction))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, false, reason, transaction));
transaction.commit();
return moved;
}
/** Move a node to given state */
private Node move(String hostname, Node.State toState, Agent agent, boolean wantToDeprovision, Optional<String> reason) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, toState, agent, wantToDeprovision, reason, transaction);
transaction.commit();
return moved;
}
/** Move a node to given state as part of a transaction */
private Node move(String hostname, Node.State toState, Agent agent, boolean wantToDeprovision, Optional<String> reason, NestedTransaction transaction) {
try (NodeMutex lock = lockAndGetRequired(hostname)) {
Node node = lock.node();
if (toState == Node.State.active) {
if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation");
for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
}
}
if (wantToDeprovision)
node = node.withWantToRetire(wantToDeprovision, wantToDeprovision, agent, clock.instant());
if (toState == Node.State.deprovisioned) {
node = node.with(IP.Config.EMPTY);
}
return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For Linux
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
try (NodeMutex nodeMutex = lockAndGetRequired(hostname)) {
Node node = nodeMutex.node();
if (node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
return node;
}
if (node.state() == Node.State.ready) return node;
Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost);
if (!failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
return setReady(nodeMutex, agent, reason);
}
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
Node node = requireNode(hostname);
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
requireRemovable(node, false, force);
NestedTransaction transaction = new NestedTransaction();
final List<Node> removed;
if (!node.type().isHost()) {
removed = List.of(node);
db.removeNodes(removed, transaction);
} else {
removed = removeChildren(node, force, transaction);
if (zone.cloud().dynamicProvisioning()) {
db.removeNodes(List.of(node), transaction);
} else {
move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
}
removed.add(node);
}
transaction.commit();
return removed;
}
}
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
if (node.status().wantToRebuild())
throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
}
private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) {
List<Node> children = list().childrenOf(node).asList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children, transaction);
return new ArrayList<>(children);
}
/**
* Throws if the given node cannot be removed. Removal is allowed if:
* - Tenant node:
* - non-recursively: node is unallocated
* - recursively: node is unallocated or node is in failed|parked
* - Host node: iff in state provisioned|failed|parked
* - Child node:
* - non-recursively: node in state ready
* - recursively: child is in state provisioned|failed|parked|dirty|ready
*/
private void requireRemovable(Node node, boolean removingRecursively, boolean force) {
if (force) return;
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (!removingRecursively || !removableStates.contains(node.state()))
illegal(node + " is currently allocated and cannot be removed while in " + node.state());
}
final Set<Node.State> removableStates;
if (node.type().isHost()) {
removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
} else {
removableStates = removingRecursively
? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready)
: EnumSet.of(Node.State.ready);
}
if (!removableStates.contains(node.state()))
illegal(node + " can not be removed while in " + node.state());
}
/**
* Throws if given node cannot be breakfixed.
* Breakfix is allowed if the following is true:
* - Node is tenant host
* - Node is in zone without dynamic provisioning
* - Node is in parked or failed state
*/
private void requireBreakfixable(Node node) {
if (zone.cloud().dynamicProvisioning()) {
illegal("Can not breakfix in zone: " + zone);
}
if (node.type() != NodeType.host) {
illegal(node + " can not be breakfixed as it is not a tenant host");
}
Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
illegal(node + " can not be removed as it is not in the states " + legalStates);
}
}
/**
* Increases the restart generation of the active nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restartActive(Predicate<Node> filter) {
return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter));
}
/**
* Increases the restart generation of the any nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> restart(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
/**
* Increases the reboot generation of the nodes matching the filter.
*
* @return the nodes in their new state
*/
public List<Node> reboot(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
/**
* Set target OS version of all nodes matching given filter.
*
* @return the nodes in their new state
*/
public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> write(node.withWantedOsVersion(version), lock));
}
/** Retire nodes matching given filter */
public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
/** Retire and deprovision given host and all of its children */
public List<Node> deprovision(String hostname, Agent agent, Instant instant) {
return decommission(hostname, HostOperation.deprovision, agent, instant);
}
/** Rebuild given host */
public List<Node> rebuild(String hostname, boolean soft, Agent agent, Instant instant) {
return decommission(hostname, soft ? HostOperation.softRebuild : HostOperation.rebuild, agent, instant);
}
private List<Node> decommission(String hostname, HostOperation op, Agent agent, Instant instant) {
Optional<NodeMutex> nodeMutex = lockAndGet(hostname);
if (nodeMutex.isEmpty()) return List.of();
Node host = nodeMutex.get().node();
if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host);
boolean wantToDeprovision = op == HostOperation.deprovision;
boolean wantToRebuild = op == HostOperation.rebuild || op == HostOperation.softRebuild;
boolean wantToRetire = op.needsRetirement();
List<Node> result = new ArrayList<>();
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
Node newHost = lock.node().withWantToRetire(wantToRetire, wantToDeprovision, wantToRebuild, agent, instant);
result.add(write(newHost, lock));
}
if (wantToRetire) {
List<Node> updatedNodes = performOn(list().childrenOf(host), (node, nodeLock) -> {
Node newNode = node.withWantToRetire(wantToRetire, wantToDeprovision, wantToRebuild, agent, instant);
return write(newNode, nodeLock);
});
result.addAll(updatedNodes);
}
return result;
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written node for convenience
*/
public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository implicitly, but callers are expected to already hold the lock.
*
* @param lock already acquired lock
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
return performOn(list().matching(filter), action);
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : nodes) {
Optional<ApplicationId> applicationId = applicationIdForLock(node);
if (applicationId.isPresent())
allocatedNodes.put(applicationId.get(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = applications.lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue()) {
Optional<Node> currentNode = db.readNode(node.hostname());
if (currentNode.isEmpty()) continue;
resultingNodes.add(action.apply(currentNode.get(), lock));
}
}
}
return resultingNodes;
}
public boolean canAllocateTenantNodeTo(Node host) {
return canAllocateTenantNodeTo(host, zone.cloud().dynamicProvisioning());
}
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
if (suspended(host)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
else
return host.state() == Node.State.active;
}
public boolean suspended(Node node) {
try {
return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended();
} catch (HostNameNotFoundException e) {
return false;
}
}
/** Create a lock which provides exclusive rights to modifying unallocated nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
private Optional<NodeMutex> lockAndGet(Node node, Optional<Duration> timeout) {
Node staleNode = node;
final int maxRetries = 4;
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode, timeout);
try {
Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
}
if (node.type() != NodeType.tenant ||
Objects.equals(freshNode.get().allocation().map(Allocation::owner),
staleNode.allocation().map(Allocation::owner))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
}
staleNode = freshNode.get();
} finally {
if (lockToClose != null) lockToClose.close();
}
}
throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " +
"fetching an up to date node under lock: " + node.hostname());
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname, Duration timeout) {
return node(hostname).flatMap(node -> lockAndGet(node, Optional.of(timeout)));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node) { return lockAndGet(node, Optional.empty()); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(Node node, Duration timeout) { return lockAndGet(node, Optional.of(timeout)); }
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(Node node) {
return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname) {
return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public NodeMutex lockAndGetRequired(String hostname, Duration timeout) {
return lockAndGet(hostname, timeout).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
private Mutex lock(Node node, Optional<Duration> timeout) {
Optional<ApplicationId> application = applicationIdForLock(node);
if (application.isPresent())
return timeout.map(t -> applications.lock(application.get(), t))
.orElseGet(() -> applications.lock(application.get()));
else
return timeout.map(db::lockInactive).orElseGet(db::lockInactive);
}
private Node requireNode(String hostname) {
return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
/** Returns the application ID that should be used for locking when modifying this node */
private static Optional<ApplicationId> applicationIdForLock(Node node) {
return switch (node.type()) {
case tenant -> node.allocation().map(Allocation::owner);
case host -> Optional.of(InfrastructureApplication.TENANT_HOST.id());
case config -> Optional.of(InfrastructureApplication.CONFIG_SERVER.id());
case confighost -> Optional.of(InfrastructureApplication.CONFIG_SERVER_HOST.id());
case controller -> Optional.of(InfrastructureApplication.CONTROLLER.id());
case controllerhost -> Optional.of(InfrastructureApplication.CONTROLLER_HOST.id());
case proxy -> Optional.of(InfrastructureApplication.PROXY.id());
case proxyhost -> Optional.of(InfrastructureApplication.PROXY_HOST.id());
};
}
private static void illegal(String message) {
throw new IllegalArgumentException(message);
}
/** Returns whether node should be parked when deallocated by given agent */
private static boolean parkOnDeallocationOf(Node node, Agent agent) {
if (node.state() == Node.State.parked) return false;
if (agent == Agent.operator) return false;
if (node.type() == NodeType.tenant && node.status().wantToDeprovision()) return false;
boolean retirementRequestedByOperator = node.status().wantToRetire() &&
node.history().event(History.Event.Type.wantToRetire)
.map(History.Event::agent)
.map(a -> a == Agent.operator)
.orElse(false);
return node.status().wantToDeprovision() ||
node.status().wantToRebuild() ||
retirementRequestedByOperator;
}
private enum HostOperation {
/** Host is deprovisioned and data is destroyed */
deprovision(true),
/** Host is deprovisioned, the same host is later re-provisioned and data is destroyed */
rebuild(true),
/** Host is stopped and re-bootstrapped, data is preserved */
softRebuild(false);
private final boolean needsRetirement;
HostOperation(boolean needsRetirement) {
this.needsRetirement = needsRetirement;
}
/** Returns whether this operation requires the host and its children to be retired */
public boolean needsRetirement() {
return needsRetirement;
}
}
} |
Without this, the reload below can be partially eaten by the other thread, resulting in a state where it fails the first run through all configs because of different generations, and never completes the second run because some configs don't have anything new. That this is possible doesn't seem right at all. I hope it's not the case for the actual JRT subscription, i.e., that that one ensures atomicity in some way, to avoid this state? | void getNewComponentGraph_hangs_waiting_for_valid_config_after_invalid_config() throws Exception {
dirConfigSource.writeConfig("test", "stringVal \"original\"");
writeBootstrapConfigs("myId", ComponentTakingConfig.class);
Container container = newContainer(dirConfigSource);
final ComponentGraph currentGraph = getNewComponentGraph(container);
writeBootstrapConfigs("thrower", ComponentThrowingExceptionForMissingConfig.class);
container.reloadConfig(2);
assertThrows(IllegalArgumentException.class,
() -> getNewComponentGraph(container, currentGraph));
ExecutorService exec = Executors.newFixedThreadPool(1);
dirConfigSource.clearCheckedConfigs();
Future<ComponentGraph> newGraph = exec.submit(() -> getNewComponentGraph(container, currentGraph));
dirConfigSource.awaitConfigChecked(10_000);
try {
newGraph.get(1, TimeUnit.SECONDS);
fail("Expected waiting for new config.");
} catch (TimeoutException ignored) {
}
writeBootstrapConfigs("myId2", ComponentTakingConfig.class);
container.reloadConfig(3);
assertNotNull(newGraph.get(10, TimeUnit.SECONDS));
container.shutdownConfigRetriever();
container.shutdown(newGraph.get());
} | dirConfigSource.awaitConfigChecked(10_000); | void getNewComponentGraph_hangs_waiting_for_valid_config_after_invalid_config() throws Exception {
dirConfigSource.writeConfig("test", "stringVal \"original\"");
writeBootstrapConfigs("myId", ComponentTakingConfig.class);
Container container = newContainer(dirConfigSource);
final ComponentGraph currentGraph = getNewComponentGraph(container);
writeBootstrapConfigs("thrower", ComponentThrowingExceptionForMissingConfig.class);
container.reloadConfig(2);
assertThrows(IllegalArgumentException.class,
() -> getNewComponentGraph(container, currentGraph));
ExecutorService exec = Executors.newFixedThreadPool(1);
dirConfigSource.clearCheckedConfigs();
Future<ComponentGraph> newGraph = exec.submit(() -> getNewComponentGraph(container, currentGraph));
dirConfigSource.awaitConfigChecked(10_000);
try {
newGraph.get(1, TimeUnit.SECONDS);
fail("Expected waiting for new config.");
} catch (TimeoutException ignored) {
}
writeBootstrapConfigs("myId2", ComponentTakingConfig.class);
container.reloadConfig(3);
assertNotNull(newGraph.get(10, TimeUnit.SECONDS));
container.shutdownConfigRetriever();
container.shutdown(newGraph.get());
} | class ContainerTest extends ContainerTestBase {
@Test
void components_can_be_created_from_config() {
writeBootstrapConfigs();
dirConfigSource.writeConfig("test", "stringVal \"myString\"");
Container container = newContainer(dirConfigSource);
ComponentTakingConfig component = createComponentTakingConfig(getNewComponentGraph(container));
assertEquals("myString", component.config.stringVal());
container.shutdownConfigRetriever();
}
@Test
void components_are_reconfigured_after_config_update_without_bootstrap_configs() {
writeBootstrapConfigs();
dirConfigSource.writeConfig("test", "stringVal \"original\"");
Container container = newContainer(dirConfigSource);
ComponentGraph componentGraph = getNewComponentGraph(container);
ComponentTakingConfig component = createComponentTakingConfig(componentGraph);
assertEquals("original", component.config.stringVal());
dirConfigSource.writeConfig("test", "stringVal \"reconfigured\"");
container.reloadConfig(2);
ComponentGraph newComponentGraph = getNewComponentGraph(container, componentGraph);
ComponentTakingConfig component2 = createComponentTakingConfig(newComponentGraph);
assertEquals("reconfigured", component2.config.stringVal());
container.shutdownConfigRetriever();
container.shutdown(newComponentGraph);
}
@Test
void graph_is_updated_after_bootstrap_update() {
dirConfigSource.writeConfig("test", "stringVal \"original\"");
writeBootstrapConfigs("id1");
Container container = newContainer(dirConfigSource);
ComponentGraph graph = getNewComponentGraph(container);
ComponentTakingConfig component = createComponentTakingConfig(graph);
assertEquals("id1", component.getId().toString());
writeBootstrapConfigs(
new ComponentEntry("id1", ComponentTakingConfig.class),
new ComponentEntry("id2", ComponentTakingConfig.class));
container.reloadConfig(2);
ComponentGraph newGraph = getNewComponentGraph(container, graph);
assertNotNull(ComponentGraph.getNode(newGraph, "id1"));
assertNotNull(ComponentGraph.getNode(newGraph, "id2"));
container.shutdownConfigRetriever();
container.shutdown(newGraph);
}
@Test
void bundle_from_previous_generation_is_uninstalled_when_not_used_in_the_new_generation() {
ComponentEntry component1 = new ComponentEntry("component1", SimpleComponent.class);
ComponentEntry component2 = new ComponentEntry("component2", SimpleComponent.class);
writeBootstrapConfigsWithBundles(List.of("bundle-1"), List.of(component1));
Container container = newContainer(dirConfigSource);
ComponentGraph graph = getNewComponentGraph(container);
assertEquals(1, osgi.getBundles().length);
assertEquals("bundle-1", osgi.getBundles()[0].getSymbolicName());
writeBootstrapConfigsWithBundles(List.of("bundle-2"), List.of(component2));
container.reloadConfig(2);
ComponentGraph newGraph = getNewComponentGraph(container, graph);
assertEquals(1, osgi.getBundles().length);
assertEquals("bundle-2", osgi.getBundles()[0].getSymbolicName());
container.shutdownConfigRetriever();
container.shutdown(newGraph);
}
@Test
void component_is_deconstructed_when_not_reused() {
writeBootstrapConfigs("id1", DestructableComponent.class);
Container container = newContainer(dirConfigSource);
ComponentGraph oldGraph = getNewComponentGraph(container);
DestructableComponent componentToDestruct = oldGraph.getInstance(DestructableComponent.class);
writeBootstrapConfigs("id2", DestructableComponent.class);
container.reloadConfig(2);
ComponentGraph newGraph = getNewComponentGraph(container, oldGraph);
assertTrue(componentToDestruct.deconstructed);
container.shutdownConfigRetriever();
container.shutdown(newGraph);
}
@Disabled("because logAndDie is impossible(?) to verify programmatically")
@Test
void manually_verify_what_happens_when_first_graph_contains_component_that_throws_exception_in_ctor() {
writeBootstrapConfigs("thrower", ComponentThrowingExceptionInConstructor.class);
Container container = newContainer(dirConfigSource);
try {
getNewComponentGraph(container);
fail("Expected to log and die.");
} catch (Throwable t) {
fail("Expected to log and die");
}
container.shutdownConfigRetriever();
}
@Test
void previous_graph_is_retained_when_new_graph_contains_component_that_throws_exception_in_ctor() {
ComponentEntry simpleComponentEntry = new ComponentEntry("simpleComponent", SimpleComponent.class);
writeBootstrapConfigs(simpleComponentEntry);
Container container = newContainer(dirConfigSource);
ComponentGraph currentGraph = getNewComponentGraph(container);
SimpleComponent simpleComponent = currentGraph.getInstance(SimpleComponent.class);
writeBootstrapConfigs("thrower", ComponentThrowingExceptionInConstructor.class);
container.reloadConfig(2);
assertNewComponentGraphFails(container, currentGraph, ComponentConstructorException.class);
assertEquals(1, currentGraph.generation());
ComponentEntry componentTakingConfigEntry = new ComponentEntry("componentTakingConfig", ComponentTakingConfig.class);
dirConfigSource.writeConfig("test", "stringVal \"myString\"");
writeBootstrapConfigs(simpleComponentEntry, componentTakingConfigEntry);
container.reloadConfig(3);
currentGraph = getNewComponentGraph(container, currentGraph);
assertEquals(3, currentGraph.generation());
assertSame(simpleComponent, currentGraph.getInstance(SimpleComponent.class));
assertNotNull(currentGraph.getInstance(ComponentTakingConfig.class));
container.shutdownConfigRetriever();
container.shutdown(currentGraph);
}
@Test
void bundle_from_generation_that_fails_in_component_construction_is_uninstalled() {
ComponentEntry simpleComponentEntry = new ComponentEntry("simpleComponent", SimpleComponent.class);
ComponentEntry throwingComponentEntry = new ComponentEntry("throwingComponent", ComponentThrowingExceptionInConstructor.class);
writeBootstrapConfigsWithBundles(List.of("bundle-1"), List.of(simpleComponentEntry));
Container container = newContainer(dirConfigSource);
ComponentGraph currentGraph = getNewComponentGraph(container);
assertEquals(1, osgi.getBundles().length);
assertEquals("bundle-1", osgi.getBundles()[0].getSymbolicName());
writeBootstrapConfigsWithBundles(List.of("bundle-2"), List.of(throwingComponentEntry));
container.reloadConfig(2);
assertNewComponentGraphFails(container, currentGraph, ComponentConstructorException.class);
assertEquals(1, currentGraph.generation());
assertEquals(1, osgi.getBundles().length);
assertEquals("bundle-1", osgi.getBundles()[0].getSymbolicName());
container.shutdownConfigRetriever();
container.shutdown(currentGraph);
}
@Test
void previous_graph_is_retained_when_new_graph_throws_exception_for_missing_config() {
ComponentEntry simpleComponentEntry = new ComponentEntry("simpleComponent", SimpleComponent.class);
writeBootstrapConfigs(simpleComponentEntry);
Container container = newContainer(dirConfigSource);
ComponentGraph currentGraph = getNewComponentGraph(container);
currentGraph.getInstance(SimpleComponent.class);
writeBootstrapConfigs("thrower", ComponentThrowingExceptionForMissingConfig.class);
dirConfigSource.writeConfig("test", "stringVal \"myString\"");
container.reloadConfig(2);
assertNewComponentGraphFails(container, currentGraph, IllegalArgumentException.class);
assertEquals(1, currentGraph.generation());
container.shutdownConfigRetriever();
container.shutdown(currentGraph);
}
@Test
void bundle_from_generation_that_throws_in_graph_creation_phase_is_uninstalled() {
ComponentEntry simpleComponent = new ComponentEntry("simpleComponent", SimpleComponent.class);
ComponentEntry configThrower = new ComponentEntry("configThrower", ComponentThrowingExceptionForMissingConfig.class);
writeBootstrapConfigsWithBundles(List.of("bundle-1"), List.of(simpleComponent));
Container container = newContainer(dirConfigSource);
ComponentGraph currentGraph = getNewComponentGraph(container);
assertEquals(1, osgi.getBundles().length);
assertEquals("bundle-1", osgi.getBundles()[0].getSymbolicName());
writeBootstrapConfigsWithBundles(List.of("bundle-2"), List.of(configThrower));
dirConfigSource.writeConfig("test", "stringVal \"myString\"");
container.reloadConfig(2);
assertNewComponentGraphFails(container, currentGraph, IllegalArgumentException.class);
assertEquals(1, currentGraph.generation());
assertEquals(1, osgi.getBundles().length);
assertEquals("bundle-1", osgi.getBundles()[0].getSymbolicName());
container.shutdownConfigRetriever();
container.shutdown(currentGraph);
}
private void assertNewComponentGraphFails(Container container, ComponentGraph currentGraph, Class<? extends RuntimeException> exception) {
try {
getNewComponentGraph(container, currentGraph);
fail("Expected exception");
} catch (Exception e) {
assertEquals(exception, e.getClass());
}
}
@Test
@Test
void providers_are_destroyed() {
writeBootstrapConfigs("id1", DestructableProvider.class);
ComponentDeconstructor deconstructor = (generation, components, bundles) -> {
components.forEach(component -> {
if (component instanceof AbstractComponent) {
((AbstractComponent) component).deconstruct();
} else if (component instanceof Provider) {
((Provider<?>) component).deconstruct();
}
});
if (!bundles.isEmpty()) throw new IllegalArgumentException("This test should not use bundles");
};
Container container = newContainer(dirConfigSource, deconstructor);
ComponentGraph oldGraph = getNewComponentGraph(container);
DestructableEntity destructableEntity = oldGraph.getInstance(DestructableEntity.class);
writeBootstrapConfigs("id2", DestructableProvider.class);
container.reloadConfig(2);
ComponentGraph graph = getNewComponentGraph(container, oldGraph);
assertTrue(destructableEntity.deconstructed);
container.shutdownConfigRetriever();
container.shutdown(graph);
}
@Test
void providers_are_invoked_only_when_needed() {
writeBootstrapConfigs("id1", FailOnGetProvider.class);
Container container = newContainer(dirConfigSource);
ComponentGraph oldGraph = getNewComponentGraph(container);
container.shutdown(oldGraph);
}
static class DestructableEntity {
private boolean deconstructed = false;
}
public static class DestructableProvider implements Provider<DestructableEntity> {
DestructableEntity instance = new DestructableEntity();
public DestructableEntity get() {
return instance;
}
public void deconstruct() {
assertFalse(instance.deconstructed);
instance.deconstructed = true;
}
}
public static class FailOnGetProvider implements Provider<Integer> {
public Integer get() {
fail("Should never be called.");
return null;
}
public void deconstruct() {
}
}
public static class ComponentTakingConfig extends AbstractComponent {
private final TestConfig config;
public ComponentTakingConfig(TestConfig config) {
assertNotNull(config);
this.config = config;
}
}
public static class ComponentThrowingExceptionInConstructor {
public ComponentThrowingExceptionInConstructor() {
throw new RuntimeException("This component fails upon construction.");
}
}
public static class ComponentThrowingExceptionForMissingConfig extends AbstractComponent {
public ComponentThrowingExceptionForMissingConfig(IntConfig intConfig) {
fail("This component should never be created. Only used for tests where 'int' config is missing.");
}
}
public static class DestructableComponent extends AbstractComponent {
private boolean deconstructed = false;
@Override
public void deconstruct() {
deconstructed = true;
}
}
private ComponentTakingConfig createComponentTakingConfig(ComponentGraph componentGraph) {
return componentGraph.getInstance(ComponentTakingConfig.class);
}
} | class ContainerTest extends ContainerTestBase {
@Test
void components_can_be_created_from_config() {
writeBootstrapConfigs();
dirConfigSource.writeConfig("test", "stringVal \"myString\"");
Container container = newContainer(dirConfigSource);
ComponentTakingConfig component = createComponentTakingConfig(getNewComponentGraph(container));
assertEquals("myString", component.config.stringVal());
container.shutdownConfigRetriever();
}
@Test
void components_are_reconfigured_after_config_update_without_bootstrap_configs() {
writeBootstrapConfigs();
dirConfigSource.writeConfig("test", "stringVal \"original\"");
Container container = newContainer(dirConfigSource);
ComponentGraph componentGraph = getNewComponentGraph(container);
ComponentTakingConfig component = createComponentTakingConfig(componentGraph);
assertEquals("original", component.config.stringVal());
dirConfigSource.writeConfig("test", "stringVal \"reconfigured\"");
container.reloadConfig(2);
ComponentGraph newComponentGraph = getNewComponentGraph(container, componentGraph);
ComponentTakingConfig component2 = createComponentTakingConfig(newComponentGraph);
assertEquals("reconfigured", component2.config.stringVal());
container.shutdownConfigRetriever();
container.shutdown(newComponentGraph);
}
@Test
void graph_is_updated_after_bootstrap_update() {
dirConfigSource.writeConfig("test", "stringVal \"original\"");
writeBootstrapConfigs("id1");
Container container = newContainer(dirConfigSource);
ComponentGraph graph = getNewComponentGraph(container);
ComponentTakingConfig component = createComponentTakingConfig(graph);
assertEquals("id1", component.getId().toString());
writeBootstrapConfigs(
new ComponentEntry("id1", ComponentTakingConfig.class),
new ComponentEntry("id2", ComponentTakingConfig.class));
container.reloadConfig(2);
ComponentGraph newGraph = getNewComponentGraph(container, graph);
assertNotNull(ComponentGraph.getNode(newGraph, "id1"));
assertNotNull(ComponentGraph.getNode(newGraph, "id2"));
container.shutdownConfigRetriever();
container.shutdown(newGraph);
}
@Test
void bundle_from_previous_generation_is_uninstalled_when_not_used_in_the_new_generation() {
ComponentEntry component1 = new ComponentEntry("component1", SimpleComponent.class);
ComponentEntry component2 = new ComponentEntry("component2", SimpleComponent.class);
writeBootstrapConfigsWithBundles(List.of("bundle-1"), List.of(component1));
Container container = newContainer(dirConfigSource);
ComponentGraph graph = getNewComponentGraph(container);
assertEquals(1, osgi.getBundles().length);
assertEquals("bundle-1", osgi.getBundles()[0].getSymbolicName());
writeBootstrapConfigsWithBundles(List.of("bundle-2"), List.of(component2));
container.reloadConfig(2);
ComponentGraph newGraph = getNewComponentGraph(container, graph);
assertEquals(1, osgi.getBundles().length);
assertEquals("bundle-2", osgi.getBundles()[0].getSymbolicName());
container.shutdownConfigRetriever();
container.shutdown(newGraph);
}
@Test
void component_is_deconstructed_when_not_reused() {
writeBootstrapConfigs("id1", DestructableComponent.class);
Container container = newContainer(dirConfigSource);
ComponentGraph oldGraph = getNewComponentGraph(container);
DestructableComponent componentToDestruct = oldGraph.getInstance(DestructableComponent.class);
writeBootstrapConfigs("id2", DestructableComponent.class);
container.reloadConfig(2);
ComponentGraph newGraph = getNewComponentGraph(container, oldGraph);
assertTrue(componentToDestruct.deconstructed);
container.shutdownConfigRetriever();
container.shutdown(newGraph);
}
@Disabled("because logAndDie is impossible(?) to verify programmatically")
@Test
void manually_verify_what_happens_when_first_graph_contains_component_that_throws_exception_in_ctor() {
writeBootstrapConfigs("thrower", ComponentThrowingExceptionInConstructor.class);
Container container = newContainer(dirConfigSource);
try {
getNewComponentGraph(container);
fail("Expected to log and die.");
} catch (Throwable t) {
fail("Expected to log and die");
}
container.shutdownConfigRetriever();
}
@Test
void previous_graph_is_retained_when_new_graph_contains_component_that_throws_exception_in_ctor() {
ComponentEntry simpleComponentEntry = new ComponentEntry("simpleComponent", SimpleComponent.class);
writeBootstrapConfigs(simpleComponentEntry);
Container container = newContainer(dirConfigSource);
ComponentGraph currentGraph = getNewComponentGraph(container);
SimpleComponent simpleComponent = currentGraph.getInstance(SimpleComponent.class);
writeBootstrapConfigs("thrower", ComponentThrowingExceptionInConstructor.class);
container.reloadConfig(2);
assertNewComponentGraphFails(container, currentGraph, ComponentConstructorException.class);
assertEquals(1, currentGraph.generation());
ComponentEntry componentTakingConfigEntry = new ComponentEntry("componentTakingConfig", ComponentTakingConfig.class);
dirConfigSource.writeConfig("test", "stringVal \"myString\"");
writeBootstrapConfigs(simpleComponentEntry, componentTakingConfigEntry);
container.reloadConfig(3);
currentGraph = getNewComponentGraph(container, currentGraph);
assertEquals(3, currentGraph.generation());
assertSame(simpleComponent, currentGraph.getInstance(SimpleComponent.class));
assertNotNull(currentGraph.getInstance(ComponentTakingConfig.class));
container.shutdownConfigRetriever();
container.shutdown(currentGraph);
}
@Test
void bundle_from_generation_that_fails_in_component_construction_is_uninstalled() {
ComponentEntry simpleComponentEntry = new ComponentEntry("simpleComponent", SimpleComponent.class);
ComponentEntry throwingComponentEntry = new ComponentEntry("throwingComponent", ComponentThrowingExceptionInConstructor.class);
writeBootstrapConfigsWithBundles(List.of("bundle-1"), List.of(simpleComponentEntry));
Container container = newContainer(dirConfigSource);
ComponentGraph currentGraph = getNewComponentGraph(container);
assertEquals(1, osgi.getBundles().length);
assertEquals("bundle-1", osgi.getBundles()[0].getSymbolicName());
writeBootstrapConfigsWithBundles(List.of("bundle-2"), List.of(throwingComponentEntry));
container.reloadConfig(2);
assertNewComponentGraphFails(container, currentGraph, ComponentConstructorException.class);
assertEquals(1, currentGraph.generation());
assertEquals(1, osgi.getBundles().length);
assertEquals("bundle-1", osgi.getBundles()[0].getSymbolicName());
container.shutdownConfigRetriever();
container.shutdown(currentGraph);
}
@Test
void previous_graph_is_retained_when_new_graph_throws_exception_for_missing_config() {
ComponentEntry simpleComponentEntry = new ComponentEntry("simpleComponent", SimpleComponent.class);
writeBootstrapConfigs(simpleComponentEntry);
Container container = newContainer(dirConfigSource);
ComponentGraph currentGraph = getNewComponentGraph(container);
currentGraph.getInstance(SimpleComponent.class);
writeBootstrapConfigs("thrower", ComponentThrowingExceptionForMissingConfig.class);
dirConfigSource.writeConfig("test", "stringVal \"myString\"");
container.reloadConfig(2);
assertNewComponentGraphFails(container, currentGraph, IllegalArgumentException.class);
assertEquals(1, currentGraph.generation());
container.shutdownConfigRetriever();
container.shutdown(currentGraph);
}
@Test
void bundle_from_generation_that_throws_in_graph_creation_phase_is_uninstalled() {
ComponentEntry simpleComponent = new ComponentEntry("simpleComponent", SimpleComponent.class);
ComponentEntry configThrower = new ComponentEntry("configThrower", ComponentThrowingExceptionForMissingConfig.class);
writeBootstrapConfigsWithBundles(List.of("bundle-1"), List.of(simpleComponent));
Container container = newContainer(dirConfigSource);
ComponentGraph currentGraph = getNewComponentGraph(container);
assertEquals(1, osgi.getBundles().length);
assertEquals("bundle-1", osgi.getBundles()[0].getSymbolicName());
writeBootstrapConfigsWithBundles(List.of("bundle-2"), List.of(configThrower));
dirConfigSource.writeConfig("test", "stringVal \"myString\"");
container.reloadConfig(2);
assertNewComponentGraphFails(container, currentGraph, IllegalArgumentException.class);
assertEquals(1, currentGraph.generation());
assertEquals(1, osgi.getBundles().length);
assertEquals("bundle-1", osgi.getBundles()[0].getSymbolicName());
container.shutdownConfigRetriever();
container.shutdown(currentGraph);
}
private void assertNewComponentGraphFails(Container container, ComponentGraph currentGraph, Class<? extends RuntimeException> exception) {
try {
getNewComponentGraph(container, currentGraph);
fail("Expected exception");
} catch (Exception e) {
assertEquals(exception, e.getClass());
}
}
@Test
@Test
void providers_are_destroyed() {
writeBootstrapConfigs("id1", DestructableProvider.class);
ComponentDeconstructor deconstructor = (generation, components, bundles) -> {
components.forEach(component -> {
if (component instanceof AbstractComponent) {
((AbstractComponent) component).deconstruct();
} else if (component instanceof Provider) {
((Provider<?>) component).deconstruct();
}
});
if (!bundles.isEmpty()) throw new IllegalArgumentException("This test should not use bundles");
};
Container container = newContainer(dirConfigSource, deconstructor);
ComponentGraph oldGraph = getNewComponentGraph(container);
DestructableEntity destructableEntity = oldGraph.getInstance(DestructableEntity.class);
writeBootstrapConfigs("id2", DestructableProvider.class);
container.reloadConfig(2);
ComponentGraph graph = getNewComponentGraph(container, oldGraph);
assertTrue(destructableEntity.deconstructed);
container.shutdownConfigRetriever();
container.shutdown(graph);
}
@Test
void providers_are_invoked_only_when_needed() {
writeBootstrapConfigs("id1", FailOnGetProvider.class);
Container container = newContainer(dirConfigSource);
ComponentGraph oldGraph = getNewComponentGraph(container);
container.shutdown(oldGraph);
}
static class DestructableEntity {
private boolean deconstructed = false;
}
public static class DestructableProvider implements Provider<DestructableEntity> {
DestructableEntity instance = new DestructableEntity();
public DestructableEntity get() {
return instance;
}
public void deconstruct() {
assertFalse(instance.deconstructed);
instance.deconstructed = true;
}
}
public static class FailOnGetProvider implements Provider<Integer> {
public Integer get() {
fail("Should never be called.");
return null;
}
public void deconstruct() {
}
}
public static class ComponentTakingConfig extends AbstractComponent {
private final TestConfig config;
public ComponentTakingConfig(TestConfig config) {
assertNotNull(config);
this.config = config;
}
}
public static class ComponentThrowingExceptionInConstructor {
public ComponentThrowingExceptionInConstructor() {
throw new RuntimeException("This component fails upon construction.");
}
}
public static class ComponentThrowingExceptionForMissingConfig extends AbstractComponent {
public ComponentThrowingExceptionForMissingConfig(IntConfig intConfig) {
fail("This component should never be created. Only used for tests where 'int' config is missing.");
}
}
public static class DestructableComponent extends AbstractComponent {
private boolean deconstructed = false;
@Override
public void deconstruct() {
deconstructed = true;
}
}
private ComponentTakingConfig createComponentTakingConfig(ComponentGraph componentGraph) {
return componentGraph.getInstance(ComponentTakingConfig.class);
}
} |
FYI there's `Pattern::asMatchPredicate` that does this | Optional<ContainerPath> enqueueCoredump(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
Predicate<String> isCoreDump = filename -> !HS_ERR_PATTERN.matcher(filename).matches();
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(attributes -> {
if (isReadyForProcessing(attributes)) {
return true;
} else {
if (isCoreDump.test(attributes.filename()))
context.log(logger, attributes.path() + " is still being written");
return false;
}
})
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> isCoreDump.test(toProcess.get(i).getFileName().toString()))
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = containerProcessingPath.resolve(coredumpIdSupplier.get());
new MakeDirectory(enqueuedDir).createParents().converge(context);
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
new FileMover(path, enqueuedDir.resolve(prefix + path.getFileName())).converge(context);
});
return Optional.of(enqueuedDir);
} | Predicate<String> isCoreDump = filename -> !HS_ERR_PATTERN.matcher(filename).matches(); | Optional<ContainerPath> enqueueCoredump(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
Predicate<String> isCoreDump = filename -> !HS_ERR_PATTERN.matcher(filename).matches();
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(attributes -> {
if (isReadyForProcessing(attributes)) {
return true;
} else {
if (isCoreDump.test(attributes.filename()))
context.log(logger, attributes.path() + " is still being written");
return false;
}
})
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> isCoreDump.test(toProcess.get(i).getFileName().toString()))
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = containerProcessingPath.resolve(coredumpIdSupplier.get());
new MakeDirectory(enqueuedDir).createParents().converge(context);
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
new FileMover(path, enqueuedDir.resolve(prefix + path.getFileName())).converge(context);
});
return Optional.of(enqueuedDir);
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String METADATA2_FILE_NAME = "metadata2.json";
private static final String COMPRESSED_EXTENSION = ".zst";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final Cores cores;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
private final BooleanFlag reportCoresViaCfgFlag;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
FlagSource flagSource) {
this(coreCollector, cores, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/,
flagSource);
}
CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier, FlagSource flagSource) {
this.coreCollector = coreCollector;
this.cores = cores;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
this.reportCoresViaCfgFlag = Flags.REPORT_CORES_VIA_CFG.bindTo(flagSource);
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier,
Optional<DockerImage> dockerImage, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(context, containerCrashPath, containerProcessingPath)
.ifPresent(path -> {
if (reportCoresViaCfgFlag.with(FetchVector.Dimension.NODE_TYPE, context.nodeType().name()).value()) {
processAndReportSingleCoreDump2(context, path, dockerImage);
} else {
processAndReportSingleCoredump(context, path, nodeAttributesSupplier);
}
});
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(context, containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
}
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
new FileDeleter(coreFile).converge(context);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
new MakeDirectory(newCoredumpDirectory).createParents().converge(context);
new FileMover(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()))
.converge(context);
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
void processAndReportSingleCoreDump2(NodeAgentContext context, ContainerPath coreDumpDirectory,
Optional<DockerImage> dockerImage) {
CoreDumpMetadata metadata = gatherMetadata(context, coreDumpDirectory);
dockerImage.ifPresent(metadata::setDockerImage);
dockerImage.flatMap(DockerImage::tag).ifPresent(metadata::setVespaVersion);
dockerImage.ifPresent(metadata::setDockerImage);
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
sharedCoreKey.map(key -> key.sealedSharedKey().toTokenString()).ifPresent(metadata::setDecryptionToken);
String coreDumpId = coreDumpDirectory.getFileName().toString();
cores.report(context.hostname(), coreDumpId, metadata);
context.log(logger, "Core dump reported: " + coreDumpId);
finishProcessing(context, coreDumpDirectory, sharedCoreKey);
}
private CoreDumpMetadata gatherMetadata(NodeAgentContext context, ContainerPath coreDumpDirectory) {
ContainerPath metadataPath = coreDumpDirectory.resolve(METADATA2_FILE_NAME);
Optional<ReportCoreDumpRequest> request = ReportCoreDumpRequest.load(metadataPath);
if (request.isPresent()) {
return request.map(requestInstance -> {
var metadata = new CoreDumpMetadata();
requestInstance.populateMetadata(metadata, FileSystems.getDefault());
return metadata;
})
.get();
}
ContainerPath coreDumpFile = findCoredumpFileInProcessingDirectory(coreDumpDirectory);
CoreDumpMetadata metadata = coreCollector.collect2(context, coreDumpFile);
metadata.setCpuMicrocodeVersion(getMicrocodeVersion())
.setKernelVersion(System.getProperty("os.version"))
.setCoreDumpPath(doneCoredumpsPath.resolve(context.containerName().asString())
.resolve(coreDumpDirectory.getFileName().toString())
.resolve(coreDumpFile.getFileName().toString()));
ReportCoreDumpRequest requestInstance = new ReportCoreDumpRequest();
requestInstance.fillFrom(metadata);
requestInstance.save(metadataPath);
context.log(logger, "Wrote " + metadataPath.pathOnHost());
return metadata;
}
private String getMicrocodeVersion() {
String output = uncheck(() -> Files.readAllLines(Paths.get("/proc/cpuinfo")).stream()
.filter(line -> line.startsWith("microcode"))
.findFirst()
.orElse("microcode : UNKNOWN"));
String[] results = output.split(":");
if (results.length != 2) {
throw ConvergenceException.ofError("Result from detect microcode command not as expected: " + output);
}
return results[1].trim();
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String METADATA2_FILE_NAME = "metadata2.json";
private static final String COMPRESSED_EXTENSION = ".zst";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final Cores cores;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
private final BooleanFlag reportCoresViaCfgFlag;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
FlagSource flagSource) {
this(coreCollector, cores, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/,
flagSource);
}
CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier, FlagSource flagSource) {
this.coreCollector = coreCollector;
this.cores = cores;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
this.reportCoresViaCfgFlag = Flags.REPORT_CORES_VIA_CFG.bindTo(flagSource);
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier,
Optional<DockerImage> dockerImage, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(context, containerCrashPath, containerProcessingPath)
.ifPresent(path -> {
if (reportCoresViaCfgFlag.with(FetchVector.Dimension.NODE_TYPE, context.nodeType().name()).value()) {
processAndReportSingleCoreDump2(context, path, dockerImage);
} else {
processAndReportSingleCoredump(context, path, nodeAttributesSupplier);
}
});
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(context, containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
}
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
new FileDeleter(coreFile).converge(context);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
new MakeDirectory(newCoredumpDirectory).createParents().converge(context);
new FileMover(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()))
.converge(context);
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
void processAndReportSingleCoreDump2(NodeAgentContext context, ContainerPath coreDumpDirectory,
Optional<DockerImage> dockerImage) {
CoreDumpMetadata metadata = gatherMetadata(context, coreDumpDirectory);
dockerImage.ifPresent(metadata::setDockerImage);
dockerImage.flatMap(DockerImage::tag).ifPresent(metadata::setVespaVersion);
dockerImage.ifPresent(metadata::setDockerImage);
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
sharedCoreKey.map(key -> key.sealedSharedKey().toTokenString()).ifPresent(metadata::setDecryptionToken);
String coreDumpId = coreDumpDirectory.getFileName().toString();
cores.report(context.hostname(), coreDumpId, metadata);
context.log(logger, "Core dump reported: " + coreDumpId);
finishProcessing(context, coreDumpDirectory, sharedCoreKey);
}
private CoreDumpMetadata gatherMetadata(NodeAgentContext context, ContainerPath coreDumpDirectory) {
ContainerPath metadataPath = coreDumpDirectory.resolve(METADATA2_FILE_NAME);
Optional<ReportCoreDumpRequest> request = ReportCoreDumpRequest.load(metadataPath);
if (request.isPresent()) {
return request.map(requestInstance -> {
var metadata = new CoreDumpMetadata();
requestInstance.populateMetadata(metadata, FileSystems.getDefault());
return metadata;
})
.get();
}
ContainerPath coreDumpFile = findCoredumpFileInProcessingDirectory(coreDumpDirectory);
CoreDumpMetadata metadata = coreCollector.collect2(context, coreDumpFile);
metadata.setCpuMicrocodeVersion(getMicrocodeVersion())
.setKernelVersion(System.getProperty("os.version"))
.setCoreDumpPath(doneCoredumpsPath.resolve(context.containerName().asString())
.resolve(coreDumpDirectory.getFileName().toString())
.resolve(coreDumpFile.getFileName().toString()));
ReportCoreDumpRequest requestInstance = new ReportCoreDumpRequest();
requestInstance.fillFrom(metadata);
requestInstance.save(metadataPath);
context.log(logger, "Wrote " + metadataPath.pathOnHost());
return metadata;
}
private String getMicrocodeVersion() {
String output = uncheck(() -> Files.readAllLines(Paths.get("/proc/cpuinfo")).stream()
.filter(line -> line.startsWith("microcode"))
.findFirst()
.orElse("microcode : UNKNOWN"));
String[] results = output.split(":");
if (results.length != 2) {
throw ConvergenceException.ofError("Result from detect microcode command not as expected: " + output);
}
return results[1].trim();
}
} |
Nice to know, but it didn't improve the readability. | Optional<ContainerPath> enqueueCoredump(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
Predicate<String> isCoreDump = filename -> !HS_ERR_PATTERN.matcher(filename).matches();
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(attributes -> {
if (isReadyForProcessing(attributes)) {
return true;
} else {
if (isCoreDump.test(attributes.filename()))
context.log(logger, attributes.path() + " is still being written");
return false;
}
})
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> isCoreDump.test(toProcess.get(i).getFileName().toString()))
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = containerProcessingPath.resolve(coredumpIdSupplier.get());
new MakeDirectory(enqueuedDir).createParents().converge(context);
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
new FileMover(path, enqueuedDir.resolve(prefix + path.getFileName())).converge(context);
});
return Optional.of(enqueuedDir);
} | Predicate<String> isCoreDump = filename -> !HS_ERR_PATTERN.matcher(filename).matches(); | Optional<ContainerPath> enqueueCoredump(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
Predicate<String> isCoreDump = filename -> !HS_ERR_PATTERN.matcher(filename).matches();
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(attributes -> {
if (isReadyForProcessing(attributes)) {
return true;
} else {
if (isCoreDump.test(attributes.filename()))
context.log(logger, attributes.path() + " is still being written");
return false;
}
})
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> isCoreDump.test(toProcess.get(i).getFileName().toString()))
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = containerProcessingPath.resolve(coredumpIdSupplier.get());
new MakeDirectory(enqueuedDir).createParents().converge(context);
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
new FileMover(path, enqueuedDir.resolve(prefix + path.getFileName())).converge(context);
});
return Optional.of(enqueuedDir);
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String METADATA2_FILE_NAME = "metadata2.json";
private static final String COMPRESSED_EXTENSION = ".zst";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final Cores cores;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
private final BooleanFlag reportCoresViaCfgFlag;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
FlagSource flagSource) {
this(coreCollector, cores, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/,
flagSource);
}
CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier, FlagSource flagSource) {
this.coreCollector = coreCollector;
this.cores = cores;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
this.reportCoresViaCfgFlag = Flags.REPORT_CORES_VIA_CFG.bindTo(flagSource);
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier,
Optional<DockerImage> dockerImage, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(context, containerCrashPath, containerProcessingPath)
.ifPresent(path -> {
if (reportCoresViaCfgFlag.with(FetchVector.Dimension.NODE_TYPE, context.nodeType().name()).value()) {
processAndReportSingleCoreDump2(context, path, dockerImage);
} else {
processAndReportSingleCoredump(context, path, nodeAttributesSupplier);
}
});
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(context, containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
}
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
new FileDeleter(coreFile).converge(context);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
new MakeDirectory(newCoredumpDirectory).createParents().converge(context);
new FileMover(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()))
.converge(context);
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
void processAndReportSingleCoreDump2(NodeAgentContext context, ContainerPath coreDumpDirectory,
Optional<DockerImage> dockerImage) {
CoreDumpMetadata metadata = gatherMetadata(context, coreDumpDirectory);
dockerImage.ifPresent(metadata::setDockerImage);
dockerImage.flatMap(DockerImage::tag).ifPresent(metadata::setVespaVersion);
dockerImage.ifPresent(metadata::setDockerImage);
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
sharedCoreKey.map(key -> key.sealedSharedKey().toTokenString()).ifPresent(metadata::setDecryptionToken);
String coreDumpId = coreDumpDirectory.getFileName().toString();
cores.report(context.hostname(), coreDumpId, metadata);
context.log(logger, "Core dump reported: " + coreDumpId);
finishProcessing(context, coreDumpDirectory, sharedCoreKey);
}
private CoreDumpMetadata gatherMetadata(NodeAgentContext context, ContainerPath coreDumpDirectory) {
ContainerPath metadataPath = coreDumpDirectory.resolve(METADATA2_FILE_NAME);
Optional<ReportCoreDumpRequest> request = ReportCoreDumpRequest.load(metadataPath);
if (request.isPresent()) {
return request.map(requestInstance -> {
var metadata = new CoreDumpMetadata();
requestInstance.populateMetadata(metadata, FileSystems.getDefault());
return metadata;
})
.get();
}
ContainerPath coreDumpFile = findCoredumpFileInProcessingDirectory(coreDumpDirectory);
CoreDumpMetadata metadata = coreCollector.collect2(context, coreDumpFile);
metadata.setCpuMicrocodeVersion(getMicrocodeVersion())
.setKernelVersion(System.getProperty("os.version"))
.setCoreDumpPath(doneCoredumpsPath.resolve(context.containerName().asString())
.resolve(coreDumpDirectory.getFileName().toString())
.resolve(coreDumpFile.getFileName().toString()));
ReportCoreDumpRequest requestInstance = new ReportCoreDumpRequest();
requestInstance.fillFrom(metadata);
requestInstance.save(metadataPath);
context.log(logger, "Wrote " + metadataPath.pathOnHost());
return metadata;
}
private String getMicrocodeVersion() {
String output = uncheck(() -> Files.readAllLines(Paths.get("/proc/cpuinfo")).stream()
.filter(line -> line.startsWith("microcode"))
.findFirst()
.orElse("microcode : UNKNOWN"));
String[] results = output.split(":");
if (results.length != 2) {
throw ConvergenceException.ofError("Result from detect microcode command not as expected: " + output);
}
return results[1].trim();
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String METADATA2_FILE_NAME = "metadata2.json";
private static final String COMPRESSED_EXTENSION = ".zst";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final Cores cores;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final Supplier<SecretSharedKey> secretSharedKeySupplier;
private final BooleanFlag reportCoresViaCfgFlag;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
FlagSource flagSource) {
this(coreCollector, cores, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), () -> null /*TODO*/,
flagSource);
}
CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
Supplier<SecretSharedKey> secretSharedKeySupplier, FlagSource flagSource) {
this.coreCollector = coreCollector;
this.cores = cores;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
this.reportCoresViaCfgFlag = Flags.REPORT_CORES_VIA_CFG.bindTo(flagSource);
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier,
Optional<DockerImage> dockerImage, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(context, containerCrashPath, containerProcessingPath)
.ifPresent(path -> {
if (reportCoresViaCfgFlag.with(FetchVector.Dimension.NODE_TYPE, context.nodeType().name()).value()) {
processAndReportSingleCoreDump2(context, path, dockerImage);
} else {
processAndReportSingleCoredump(context, path, nodeAttributesSupplier);
}
});
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(context, containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
}
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
new FileDeleter(coreFile).converge(context);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
new MakeDirectory(newCoredumpDirectory).createParents().converge(context);
new FileMover(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()))
.converge(context);
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
void processAndReportSingleCoreDump2(NodeAgentContext context, ContainerPath coreDumpDirectory,
Optional<DockerImage> dockerImage) {
CoreDumpMetadata metadata = gatherMetadata(context, coreDumpDirectory);
dockerImage.ifPresent(metadata::setDockerImage);
dockerImage.flatMap(DockerImage::tag).ifPresent(metadata::setVespaVersion);
dockerImage.ifPresent(metadata::setDockerImage);
Optional<SecretSharedKey> sharedCoreKey = Optional.ofNullable(secretSharedKeySupplier.get());
sharedCoreKey.map(key -> key.sealedSharedKey().toTokenString()).ifPresent(metadata::setDecryptionToken);
String coreDumpId = coreDumpDirectory.getFileName().toString();
cores.report(context.hostname(), coreDumpId, metadata);
context.log(logger, "Core dump reported: " + coreDumpId);
finishProcessing(context, coreDumpDirectory, sharedCoreKey);
}
private CoreDumpMetadata gatherMetadata(NodeAgentContext context, ContainerPath coreDumpDirectory) {
ContainerPath metadataPath = coreDumpDirectory.resolve(METADATA2_FILE_NAME);
Optional<ReportCoreDumpRequest> request = ReportCoreDumpRequest.load(metadataPath);
if (request.isPresent()) {
return request.map(requestInstance -> {
var metadata = new CoreDumpMetadata();
requestInstance.populateMetadata(metadata, FileSystems.getDefault());
return metadata;
})
.get();
}
ContainerPath coreDumpFile = findCoredumpFileInProcessingDirectory(coreDumpDirectory);
CoreDumpMetadata metadata = coreCollector.collect2(context, coreDumpFile);
metadata.setCpuMicrocodeVersion(getMicrocodeVersion())
.setKernelVersion(System.getProperty("os.version"))
.setCoreDumpPath(doneCoredumpsPath.resolve(context.containerName().asString())
.resolve(coreDumpDirectory.getFileName().toString())
.resolve(coreDumpFile.getFileName().toString()));
ReportCoreDumpRequest requestInstance = new ReportCoreDumpRequest();
requestInstance.fillFrom(metadata);
requestInstance.save(metadataPath);
context.log(logger, "Wrote " + metadataPath.pathOnHost());
return metadata;
}
private String getMicrocodeVersion() {
String output = uncheck(() -> Files.readAllLines(Paths.get("/proc/cpuinfo")).stream()
.filter(line -> line.startsWith("microcode"))
.findFirst()
.orElse("microcode : UNKNOWN"));
String[] results = output.split(":");
if (results.length != 2) {
throw ConvergenceException.ofError("Result from detect microcode command not as expected: " + output);
}
return results[1].trim();
}
} |
Use `Arrays.copyOf()`. `Object.clone()` is a sad part of Java's history and should never be used... | public static KeyId ofBytes(byte[] keyIdBytes) {
Objects.requireNonNull(keyIdBytes);
return new KeyId(keyIdBytes.clone());
} | return new KeyId(keyIdBytes.clone()); | public static KeyId ofBytes(byte[] keyIdBytes) {
Objects.requireNonNull(keyIdBytes);
return new KeyId(Arrays.copyOf(keyIdBytes, keyIdBytes.length));
} | class KeyId {
public static final int MAX_KEY_ID_UTF8_LENGTH = 255;
private final byte[] keyIdBytes;
private KeyId(byte[] keyIdBytes) {
if (keyIdBytes.length > MAX_KEY_ID_UTF8_LENGTH) {
throw new IllegalArgumentException("Key ID is too large to be encoded (max is %d, got %d)"
.formatted(MAX_KEY_ID_UTF8_LENGTH, keyIdBytes.length));
}
verifyByteStringRoundtripsAsValidUtf8(keyIdBytes);
this.keyIdBytes = keyIdBytes;
}
/**
* Construct a KeyId containing the given sequence of bytes.
*
* @param keyIdBytes array of valid UTF-8 bytes. May be zero-length, but not null.
* Note: to avoid accidental mutations, the key bytes are deep-copied.
* @return a new KeyId instance
*/
/**
* Construct a KeyId containing the UTF-8 byte representation of the given string.
*
* @param keyId a string whose UTF-8 byte representation will be the key ID. May be
* zero-length but not null.
* @return a new KeyId instance
*/
public static KeyId ofString(String keyId) {
Objects.requireNonNull(keyId);
return new KeyId(toUtf8Bytes(keyId));
}
/**
* @return the raw backing byte array. <strong>Must therefore not be mutated.</strong>
*/
public byte[] asBytes() { return keyIdBytes; }
public String asString() { return fromUtf8Bytes(keyIdBytes); }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
KeyId keyId = (KeyId) o;
return Arrays.equals(keyIdBytes, keyId.keyIdBytes);
}
@Override
public int hashCode() {
return Arrays.hashCode(keyIdBytes);
}
@Override
public String toString() {
return "KeyId(%s)".formatted(asString());
}
private static void verifyByteStringRoundtripsAsValidUtf8(byte[] byteStr) {
String asStr = fromUtf8Bytes(byteStr);
byte[] asBytes = toUtf8Bytes(asStr);
if (!Arrays.equals(byteStr, asBytes)) {
throw new IllegalArgumentException("Key ID is not valid normalized UTF-8");
}
}
} | class KeyId {
public static final int MAX_KEY_ID_UTF8_LENGTH = 255;
private final byte[] keyIdBytes;
private KeyId(byte[] keyIdBytes) {
if (keyIdBytes.length > MAX_KEY_ID_UTF8_LENGTH) {
throw new IllegalArgumentException("Key ID is too large to be encoded (max is %d, got %d)"
.formatted(MAX_KEY_ID_UTF8_LENGTH, keyIdBytes.length));
}
verifyByteStringRoundtripsAsValidUtf8(keyIdBytes);
this.keyIdBytes = keyIdBytes;
}
/**
* Construct a KeyId containing the given sequence of bytes.
*
* @param keyIdBytes array of valid UTF-8 bytes. May be zero-length, but not null.
* Note: to avoid accidental mutations, the key bytes are deep-copied.
* @return a new KeyId instance
*/
/**
* Construct a KeyId containing the UTF-8 byte representation of the given string.
*
* @param keyId a string whose UTF-8 byte representation will be the key ID. May be
* zero-length but not null.
* @return a new KeyId instance
*/
public static KeyId ofString(String keyId) {
Objects.requireNonNull(keyId);
return new KeyId(toUtf8Bytes(keyId));
}
/**
* @return the raw backing byte array. <strong>Must therefore not be mutated.</strong>
*/
public byte[] asBytes() { return keyIdBytes; }
public String asString() { return fromUtf8Bytes(keyIdBytes); }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
KeyId keyId = (KeyId) o;
return Arrays.equals(keyIdBytes, keyId.keyIdBytes);
}
@Override
public int hashCode() {
return Arrays.hashCode(keyIdBytes);
}
@Override
public String toString() {
return "KeyId(%s)".formatted(asString());
}
private static void verifyByteStringRoundtripsAsValidUtf8(byte[] byteStr) {
String asStr = fromUtf8Bytes(byteStr);
byte[] asBytes = toUtf8Bytes(asStr);
if (!Arrays.equals(byteStr, asBytes)) {
throw new IllegalArgumentException("Key ID is not valid normalized UTF-8");
}
}
} |
See previous comment | void key_id_bytes_are_deep_copied_when_constructed_from_raw_byte_array() {
byte[] keyBytes = new byte[]{'f','o','o'};
byte[] expected = keyBytes.clone();
var id = KeyId.ofBytes(keyBytes);
keyBytes[0] = 'b';
assertArrayEquals(expected, id.asBytes());
} | byte[] expected = keyBytes.clone(); | void key_id_bytes_are_deep_copied_when_constructed_from_raw_byte_array() {
byte[] keyBytes = new byte[]{'f','o','o'};
byte[] expected = Arrays.copyOf(keyBytes, keyBytes.length);
var id = KeyId.ofBytes(keyBytes);
keyBytes[0] = 'b';
assertArrayEquals(expected, id.asBytes());
} | class KeyIdTest {
@Test
void equality_predicated_on_key_id_byte_string() {
var id0s = KeyId.ofString("");
var id1s = KeyId.ofString("1");
var id2s = KeyId.ofString("12");
assertEquals(id0s, id0s);
assertEquals(id1s, id1s);
assertEquals(id2s, id2s);
assertNotEquals(id0s, id1s);
assertNotEquals(id1s, id0s);
assertNotEquals(id1s, id2s);
assertNotEquals(id0s, id2s);
var id0b = KeyId.ofBytes(new byte[0]);
var id1b = KeyId.ofBytes(new byte[]{ '1' });
var id2b = KeyId.ofBytes(new byte[]{ '1', '2' });
assertEquals(id0s, id0b);
assertEquals(id1s, id1b);
assertEquals(id2s, id2b);
}
@Test
void accessors_return_expected_values() {
byte[] fooBytes = new byte[]{'f','o','o'};
byte[] barBytes = new byte[]{'b','a','r'};
var id1 = KeyId.ofString("foo");
assertEquals("foo", id1.asString());
assertArrayEquals(fooBytes, id1.asBytes());
var id2 = KeyId.ofBytes(barBytes);
assertEquals("bar", id2.asString());
assertArrayEquals(barBytes, id2.asBytes());
}
@Test
@Test
void can_construct_largest_possible_key_id() {
byte[] okIdBytes = new byte[KeyId.MAX_KEY_ID_UTF8_LENGTH];
Arrays.fill(okIdBytes, (byte)'A');
var okId = KeyId.ofBytes(okIdBytes);
assertArrayEquals(okIdBytes, okId.asBytes());
}
@Test
void too_big_key_id_throws() {
byte[] tooBigIdBytes = new byte[KeyId.MAX_KEY_ID_UTF8_LENGTH + 1];
Arrays.fill(tooBigIdBytes, (byte)'A');
assertThrows(IllegalArgumentException.class, () -> KeyId.ofBytes(tooBigIdBytes));
}
@Test
void malformed_utf8_key_id_is_rejected_on_construction() {
byte[] malformedIdBytes = new byte[]{ (byte)0xC0 };
assertThrows(IllegalArgumentException.class, () -> KeyId.ofBytes(malformedIdBytes));
}
} | class KeyIdTest {
@Test
void equality_predicated_on_key_id_byte_string() {
var id0s = KeyId.ofString("");
var id1s = KeyId.ofString("1");
var id2s = KeyId.ofString("12");
assertEquals(id0s, id0s);
assertEquals(id1s, id1s);
assertEquals(id2s, id2s);
assertNotEquals(id0s, id1s);
assertNotEquals(id1s, id0s);
assertNotEquals(id1s, id2s);
assertNotEquals(id0s, id2s);
var id0b = KeyId.ofBytes(new byte[0]);
var id1b = KeyId.ofBytes(new byte[]{ '1' });
var id2b = KeyId.ofBytes(new byte[]{ '1', '2' });
assertEquals(id0s, id0b);
assertEquals(id1s, id1b);
assertEquals(id2s, id2b);
}
@Test
void accessors_return_expected_values() {
byte[] fooBytes = new byte[]{'f','o','o'};
byte[] barBytes = new byte[]{'b','a','r'};
var id1 = KeyId.ofString("foo");
assertEquals("foo", id1.asString());
assertArrayEquals(fooBytes, id1.asBytes());
var id2 = KeyId.ofBytes(barBytes);
assertEquals("bar", id2.asString());
assertArrayEquals(barBytes, id2.asBytes());
}
@Test
@Test
void can_construct_largest_possible_key_id() {
byte[] okIdBytes = new byte[KeyId.MAX_KEY_ID_UTF8_LENGTH];
Arrays.fill(okIdBytes, (byte)'A');
var okId = KeyId.ofBytes(okIdBytes);
assertArrayEquals(okIdBytes, okId.asBytes());
}
@Test
void too_big_key_id_throws() {
byte[] tooBigIdBytes = new byte[KeyId.MAX_KEY_ID_UTF8_LENGTH + 1];
Arrays.fill(tooBigIdBytes, (byte)'A');
assertThrows(IllegalArgumentException.class, () -> KeyId.ofBytes(tooBigIdBytes));
}
@Test
void malformed_utf8_key_id_is_rejected_on_construction() {
byte[] malformedIdBytes = new byte[]{ (byte)0xC0 };
assertThrows(IllegalArgumentException.class, () -> KeyId.ofBytes(malformedIdBytes));
}
} |
Array copying fixed to be more cheerful, less sad | public static KeyId ofBytes(byte[] keyIdBytes) {
Objects.requireNonNull(keyIdBytes);
return new KeyId(keyIdBytes.clone());
} | return new KeyId(keyIdBytes.clone()); | public static KeyId ofBytes(byte[] keyIdBytes) {
Objects.requireNonNull(keyIdBytes);
return new KeyId(Arrays.copyOf(keyIdBytes, keyIdBytes.length));
} | class KeyId {
public static final int MAX_KEY_ID_UTF8_LENGTH = 255;
private final byte[] keyIdBytes;
private KeyId(byte[] keyIdBytes) {
if (keyIdBytes.length > MAX_KEY_ID_UTF8_LENGTH) {
throw new IllegalArgumentException("Key ID is too large to be encoded (max is %d, got %d)"
.formatted(MAX_KEY_ID_UTF8_LENGTH, keyIdBytes.length));
}
verifyByteStringRoundtripsAsValidUtf8(keyIdBytes);
this.keyIdBytes = keyIdBytes;
}
/**
* Construct a KeyId containing the given sequence of bytes.
*
* @param keyIdBytes array of valid UTF-8 bytes. May be zero-length, but not null.
* Note: to avoid accidental mutations, the key bytes are deep-copied.
* @return a new KeyId instance
*/
/**
* Construct a KeyId containing the UTF-8 byte representation of the given string.
*
* @param keyId a string whose UTF-8 byte representation will be the key ID. May be
* zero-length but not null.
* @return a new KeyId instance
*/
public static KeyId ofString(String keyId) {
Objects.requireNonNull(keyId);
return new KeyId(toUtf8Bytes(keyId));
}
/**
* @return the raw backing byte array. <strong>Must therefore not be mutated.</strong>
*/
public byte[] asBytes() { return keyIdBytes; }
public String asString() { return fromUtf8Bytes(keyIdBytes); }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
KeyId keyId = (KeyId) o;
return Arrays.equals(keyIdBytes, keyId.keyIdBytes);
}
@Override
public int hashCode() {
return Arrays.hashCode(keyIdBytes);
}
@Override
public String toString() {
return "KeyId(%s)".formatted(asString());
}
private static void verifyByteStringRoundtripsAsValidUtf8(byte[] byteStr) {
String asStr = fromUtf8Bytes(byteStr);
byte[] asBytes = toUtf8Bytes(asStr);
if (!Arrays.equals(byteStr, asBytes)) {
throw new IllegalArgumentException("Key ID is not valid normalized UTF-8");
}
}
} | class KeyId {
public static final int MAX_KEY_ID_UTF8_LENGTH = 255;
private final byte[] keyIdBytes;
private KeyId(byte[] keyIdBytes) {
if (keyIdBytes.length > MAX_KEY_ID_UTF8_LENGTH) {
throw new IllegalArgumentException("Key ID is too large to be encoded (max is %d, got %d)"
.formatted(MAX_KEY_ID_UTF8_LENGTH, keyIdBytes.length));
}
verifyByteStringRoundtripsAsValidUtf8(keyIdBytes);
this.keyIdBytes = keyIdBytes;
}
/**
* Construct a KeyId containing the given sequence of bytes.
*
* @param keyIdBytes array of valid UTF-8 bytes. May be zero-length, but not null.
* Note: to avoid accidental mutations, the key bytes are deep-copied.
* @return a new KeyId instance
*/
/**
* Construct a KeyId containing the UTF-8 byte representation of the given string.
*
* @param keyId a string whose UTF-8 byte representation will be the key ID. May be
* zero-length but not null.
* @return a new KeyId instance
*/
public static KeyId ofString(String keyId) {
Objects.requireNonNull(keyId);
return new KeyId(toUtf8Bytes(keyId));
}
/**
* @return the raw backing byte array. <strong>Must therefore not be mutated.</strong>
*/
public byte[] asBytes() { return keyIdBytes; }
public String asString() { return fromUtf8Bytes(keyIdBytes); }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
KeyId keyId = (KeyId) o;
return Arrays.equals(keyIdBytes, keyId.keyIdBytes);
}
@Override
public int hashCode() {
return Arrays.hashCode(keyIdBytes);
}
@Override
public String toString() {
return "KeyId(%s)".formatted(asString());
}
private static void verifyByteStringRoundtripsAsValidUtf8(byte[] byteStr) {
String asStr = fromUtf8Bytes(byteStr);
byte[] asBytes = toUtf8Bytes(asStr);
if (!Arrays.equals(byteStr, asBytes)) {
throw new IllegalArgumentException("Key ID is not valid normalized UTF-8");
}
}
} |
This was `operator` originally (see comment above), changed to DPM in #23345 | protected double maintain() {
NodeList allNodes = nodeRepository().nodes().list();
Map<String, Set<Node>> nodesByProvisionedParentHostname =
allNodes.nodeType(NodeType.tenant, NodeType.config, NodeType.controller)
.asList()
.stream()
.filter(node -> node.parentHostname().isPresent())
.collect(Collectors.groupingBy(node -> node.parentHostname().get(), Collectors.toSet()));
NodeList hosts = allNodes.state(Node.State.provisioned).nodeType(NodeType.host, NodeType.confighost, NodeType.controllerhost);
int failures = 0;
for (Node host : hosts) {
Set<Node> children = nodesByProvisionedParentHostname.getOrDefault(host.hostname(), Set.of());
try (var lock = nodeRepository().nodes().lockUnallocated()) {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
verifyDns(updatedNodes);
nodeRepository().nodes().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Could not provision " + host.hostname() + " with " + children.size() + " children, will retry in " +
interval() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
failures++;
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().nodes().failOrMarkRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
if (e.getCause() instanceof NamingException)
log.log(Level.INFO, "Could not provision " + host.hostname() + ", will retry in " + interval() + ": " + Exceptions.toMessageString(e));
else {
failures++;
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
}
}
return asSuccessFactor(hosts.size(), failures);
} | host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure"); | protected double maintain() {
NodeList allNodes = nodeRepository().nodes().list();
Map<String, Set<Node>> nodesByProvisionedParentHostname =
allNodes.nodeType(NodeType.tenant, NodeType.config, NodeType.controller)
.asList()
.stream()
.filter(node -> node.parentHostname().isPresent())
.collect(Collectors.groupingBy(node -> node.parentHostname().get(), Collectors.toSet()));
NodeList hosts = allNodes.state(Node.State.provisioned).nodeType(NodeType.host, NodeType.confighost, NodeType.controllerhost);
int failures = 0;
for (Node host : hosts) {
Set<Node> children = nodesByProvisionedParentHostname.getOrDefault(host.hostname(), Set.of());
try (var lock = nodeRepository().nodes().lockUnallocated()) {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
verifyDns(updatedNodes);
nodeRepository().nodes().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Could not provision " + host.hostname() + " with " + children.size() + " children, will retry in " +
interval() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
failures++;
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().nodes().failOrMarkRecursively(
host.hostname(), Agent.HostResumeProvisioner, "Failed by HostResumeProvisioner due to provisioning failure");
} catch (RuntimeException e) {
if (e.getCause() instanceof NamingException)
log.log(Level.INFO, "Could not provision " + host.hostname() + ", will retry in " + interval() + ": " + Exceptions.toMessageString(e));
else {
failures++;
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
}
}
return asSuccessFactor(hosts.size(), failures);
} | class HostResumeProvisioner extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(HostResumeProvisioner.class.getName());
private final HostProvisioner hostProvisioner;
HostResumeProvisioner(NodeRepository nodeRepository, Duration interval, Metric metric, HostProvisioner hostProvisioner) {
super(nodeRepository, interval, metric);
this.hostProvisioner = hostProvisioner;
}
@Override
/** Verify DNS configuration of given nodes */
private void verifyDns(List<Node> nodes) {
for (var node : nodes) {
for (var ipAddress : node.ipConfig().primary()) {
IP.verifyDns(node.hostname(), ipAddress, nodeRepository().nameResolver());
}
}
}
} | class HostResumeProvisioner extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(HostResumeProvisioner.class.getName());
private final HostProvisioner hostProvisioner;
HostResumeProvisioner(NodeRepository nodeRepository, Duration interval, Metric metric, HostProvisioner hostProvisioner) {
super(nodeRepository, interval, metric);
this.hostProvisioner = hostProvisioner;
}
@Override
/** Verify DNS configuration of given nodes */
private void verifyDns(List<Node> nodes) {
for (var node : nodes) {
for (var ipAddress : node.ipConfig().primary()) {
IP.verifyDns(node.hostname(), ipAddress, nodeRepository().nameResolver());
}
}
}
} |
hm, could quick redeployment be triggered for a HostResumeProvisioner agent too? | protected double maintain() {
NodeList allNodes = nodeRepository().nodes().list();
Map<String, Set<Node>> nodesByProvisionedParentHostname =
allNodes.nodeType(NodeType.tenant, NodeType.config, NodeType.controller)
.asList()
.stream()
.filter(node -> node.parentHostname().isPresent())
.collect(Collectors.groupingBy(node -> node.parentHostname().get(), Collectors.toSet()));
NodeList hosts = allNodes.state(Node.State.provisioned).nodeType(NodeType.host, NodeType.confighost, NodeType.controllerhost);
int failures = 0;
for (Node host : hosts) {
Set<Node> children = nodesByProvisionedParentHostname.getOrDefault(host.hostname(), Set.of());
try (var lock = nodeRepository().nodes().lockUnallocated()) {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
verifyDns(updatedNodes);
nodeRepository().nodes().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Could not provision " + host.hostname() + " with " + children.size() + " children, will retry in " +
interval() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
failures++;
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().nodes().failOrMarkRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
if (e.getCause() instanceof NamingException)
log.log(Level.INFO, "Could not provision " + host.hostname() + ", will retry in " + interval() + ": " + Exceptions.toMessageString(e));
else {
failures++;
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
}
}
return asSuccessFactor(hosts.size(), failures);
} | host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure"); | protected double maintain() {
NodeList allNodes = nodeRepository().nodes().list();
Map<String, Set<Node>> nodesByProvisionedParentHostname =
allNodes.nodeType(NodeType.tenant, NodeType.config, NodeType.controller)
.asList()
.stream()
.filter(node -> node.parentHostname().isPresent())
.collect(Collectors.groupingBy(node -> node.parentHostname().get(), Collectors.toSet()));
NodeList hosts = allNodes.state(Node.State.provisioned).nodeType(NodeType.host, NodeType.confighost, NodeType.controllerhost);
int failures = 0;
for (Node host : hosts) {
Set<Node> children = nodesByProvisionedParentHostname.getOrDefault(host.hostname(), Set.of());
try (var lock = nodeRepository().nodes().lockUnallocated()) {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
verifyDns(updatedNodes);
nodeRepository().nodes().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Could not provision " + host.hostname() + " with " + children.size() + " children, will retry in " +
interval() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
failures++;
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().nodes().failOrMarkRecursively(
host.hostname(), Agent.HostResumeProvisioner, "Failed by HostResumeProvisioner due to provisioning failure");
} catch (RuntimeException e) {
if (e.getCause() instanceof NamingException)
log.log(Level.INFO, "Could not provision " + host.hostname() + ", will retry in " + interval() + ": " + Exceptions.toMessageString(e));
else {
failures++;
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
}
}
return asSuccessFactor(hosts.size(), failures);
} | class HostResumeProvisioner extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(HostResumeProvisioner.class.getName());
private final HostProvisioner hostProvisioner;
HostResumeProvisioner(NodeRepository nodeRepository, Duration interval, Metric metric, HostProvisioner hostProvisioner) {
super(nodeRepository, interval, metric);
this.hostProvisioner = hostProvisioner;
}
@Override
/** Verify DNS configuration of given nodes */
private void verifyDns(List<Node> nodes) {
for (var node : nodes) {
for (var ipAddress : node.ipConfig().primary()) {
IP.verifyDns(node.hostname(), ipAddress, nodeRepository().nameResolver());
}
}
}
} | class HostResumeProvisioner extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(HostResumeProvisioner.class.getName());
private final HostProvisioner hostProvisioner;
HostResumeProvisioner(NodeRepository nodeRepository, Duration interval, Metric metric, HostProvisioner hostProvisioner) {
super(nodeRepository, interval, metric);
this.hostProvisioner = hostProvisioner;
}
@Override
/** Verify DNS configuration of given nodes */
private void verifyDns(List<Node> nodes) {
for (var node : nodes) {
for (var ipAddress : node.ipConfig().primary()) {
IP.verifyDns(node.hostname(), ipAddress, nodeRepository().nameResolver());
}
}
}
} |
Done | protected double maintain() {
NodeList allNodes = nodeRepository().nodes().list();
Map<String, Set<Node>> nodesByProvisionedParentHostname =
allNodes.nodeType(NodeType.tenant, NodeType.config, NodeType.controller)
.asList()
.stream()
.filter(node -> node.parentHostname().isPresent())
.collect(Collectors.groupingBy(node -> node.parentHostname().get(), Collectors.toSet()));
NodeList hosts = allNodes.state(Node.State.provisioned).nodeType(NodeType.host, NodeType.confighost, NodeType.controllerhost);
int failures = 0;
for (Node host : hosts) {
Set<Node> children = nodesByProvisionedParentHostname.getOrDefault(host.hostname(), Set.of());
try (var lock = nodeRepository().nodes().lockUnallocated()) {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
verifyDns(updatedNodes);
nodeRepository().nodes().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Could not provision " + host.hostname() + " with " + children.size() + " children, will retry in " +
interval() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
failures++;
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().nodes().failOrMarkRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
if (e.getCause() instanceof NamingException)
log.log(Level.INFO, "Could not provision " + host.hostname() + ", will retry in " + interval() + ": " + Exceptions.toMessageString(e));
else {
failures++;
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
}
}
return asSuccessFactor(hosts.size(), failures);
} | host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure"); | protected double maintain() {
NodeList allNodes = nodeRepository().nodes().list();
Map<String, Set<Node>> nodesByProvisionedParentHostname =
allNodes.nodeType(NodeType.tenant, NodeType.config, NodeType.controller)
.asList()
.stream()
.filter(node -> node.parentHostname().isPresent())
.collect(Collectors.groupingBy(node -> node.parentHostname().get(), Collectors.toSet()));
NodeList hosts = allNodes.state(Node.State.provisioned).nodeType(NodeType.host, NodeType.confighost, NodeType.controllerhost);
int failures = 0;
for (Node host : hosts) {
Set<Node> children = nodesByProvisionedParentHostname.getOrDefault(host.hostname(), Set.of());
try (var lock = nodeRepository().nodes().lockUnallocated()) {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
verifyDns(updatedNodes);
nodeRepository().nodes().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Could not provision " + host.hostname() + " with " + children.size() + " children, will retry in " +
interval() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
failures++;
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().nodes().failOrMarkRecursively(
host.hostname(), Agent.HostResumeProvisioner, "Failed by HostResumeProvisioner due to provisioning failure");
} catch (RuntimeException e) {
if (e.getCause() instanceof NamingException)
log.log(Level.INFO, "Could not provision " + host.hostname() + ", will retry in " + interval() + ": " + Exceptions.toMessageString(e));
else {
failures++;
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
}
}
return asSuccessFactor(hosts.size(), failures);
} | class HostResumeProvisioner extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(HostResumeProvisioner.class.getName());
private final HostProvisioner hostProvisioner;
HostResumeProvisioner(NodeRepository nodeRepository, Duration interval, Metric metric, HostProvisioner hostProvisioner) {
super(nodeRepository, interval, metric);
this.hostProvisioner = hostProvisioner;
}
@Override
/** Verify DNS configuration of given nodes */
private void verifyDns(List<Node> nodes) {
for (var node : nodes) {
for (var ipAddress : node.ipConfig().primary()) {
IP.verifyDns(node.hostname(), ipAddress, nodeRepository().nameResolver());
}
}
}
} | class HostResumeProvisioner extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(HostResumeProvisioner.class.getName());
private final HostProvisioner hostProvisioner;
HostResumeProvisioner(NodeRepository nodeRepository, Duration interval, Metric metric, HostProvisioner hostProvisioner) {
super(nodeRepository, interval, metric);
this.hostProvisioner = hostProvisioner;
}
@Override
/** Verify DNS configuration of given nodes */
private void verifyDns(List<Node> nodes) {
for (var node : nodes) {
for (var ipAddress : node.ipConfig().primary()) {
IP.verifyDns(node.hostname(), ipAddress, nodeRepository().nameResolver());
}
}
}
} |
Where will the key ID come from when this has rolled out? | private String corePublicKeyFlagValue(NodeAgentContext context) {
return coreEncryptionPublicKeyIdFlag.with(FetchVector.Dimension.NODE_TYPE, context.nodeType().name()).value();
} | return coreEncryptionPublicKeyIdFlag.with(FetchVector.Dimension.NODE_TYPE, context.nodeType().name()).value(); | private String corePublicKeyFlagValue(NodeAgentContext context) {
return coreEncryptionPublicKeyIdFlag.with(FetchVector.Dimension.NODE_TYPE, context.nodeType().name()).value();
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String METADATA2_FILE_NAME = "metadata2.json";
private static final String COMPRESSED_EXTENSION = ".zst";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final Cores cores;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final SecretSharedKeySupplier secretSharedKeySupplier;
private final BooleanFlag reportCoresViaCfgFlag;
private final StringFlag coreEncryptionPublicKeyIdFlag;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
FlagSource flagSource) {
this(coreCollector, cores, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), (ctx) -> Optional.empty() /*TODO*/,
flagSource);
}
public CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
SecretSharedKeySupplier secretSharedKeySupplier, FlagSource flagSource) {
this(coreCollector, cores, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), secretSharedKeySupplier,
flagSource);
}
CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
SecretSharedKeySupplier secretSharedKeySupplier, FlagSource flagSource) {
this.coreCollector = coreCollector;
this.cores = cores;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
this.reportCoresViaCfgFlag = Flags.REPORT_CORES_VIA_CFG.bindTo(flagSource);
this.coreEncryptionPublicKeyIdFlag = Flags.CORE_ENCRYPTION_PUBLIC_KEY_ID.bindTo(flagSource);
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier,
Optional<DockerImage> dockerImage, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(context, containerCrashPath, containerProcessingPath)
.ifPresent(path -> {
if (reportCoresViaCfgFlag.with(FetchVector.Dimension.NODE_TYPE, context.nodeType().name()).value()) {
processAndReportSingleCoreDump2(context, path, dockerImage);
} else {
processAndReportSingleCoredump(context, path, nodeAttributesSupplier);
}
});
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(context, containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
Predicate<String> isCoreDump = filename -> !HS_ERR_PATTERN.matcher(filename).matches();
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(attributes -> {
if (isReadyForProcessing(attributes)) {
return true;
} else {
if (isCoreDump.test(attributes.filename()))
context.log(logger, attributes.path() + " is still being written");
return false;
}
})
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> isCoreDump.test(toProcess.get(i).getFileName().toString()))
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = containerProcessingPath.resolve(coredumpIdSupplier.get());
new MakeDirectory(enqueuedDir).createParents().converge(context);
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
new FileMover(path, enqueuedDir.resolve(prefix + path.getFileName())).converge(context);
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.of(corePublicKeyFlagValue(context))
.filter(k -> !k.isEmpty())
.map(KeyId::ofString)
.flatMap(secretSharedKeySupplier::create);
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
}
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
new FileDeleter(coreFile).converge(context);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
new MakeDirectory(newCoredumpDirectory).createParents().converge(context);
new FileMover(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()))
.converge(context);
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
void processAndReportSingleCoreDump2(NodeAgentContext context, ContainerPath coreDumpDirectory,
Optional<DockerImage> dockerImage) {
CoreDumpMetadata metadata = gatherMetadata(context, coreDumpDirectory);
dockerImage.ifPresent(metadata::setDockerImage);
dockerImage.flatMap(DockerImage::tag).ifPresent(metadata::setVespaVersion);
dockerImage.ifPresent(metadata::setDockerImage);
Optional<SecretSharedKey> sharedCoreKey = Optional.of(corePublicKeyFlagValue(context))
.filter(k -> !k.isEmpty())
.map(KeyId::ofString)
.flatMap(secretSharedKeySupplier::create);
sharedCoreKey.map(key -> key.sealedSharedKey().toTokenString()).ifPresent(metadata::setDecryptionToken);
String coreDumpId = coreDumpDirectory.getFileName().toString();
cores.report(context.hostname(), coreDumpId, metadata);
context.log(logger, "Core dump reported: " + coreDumpId);
finishProcessing(context, coreDumpDirectory, sharedCoreKey);
}
private CoreDumpMetadata gatherMetadata(NodeAgentContext context, ContainerPath coreDumpDirectory) {
ContainerPath metadataPath = coreDumpDirectory.resolve(METADATA2_FILE_NAME);
Optional<ReportCoreDumpRequest> request = ReportCoreDumpRequest.load(metadataPath);
if (request.isPresent()) {
return request.map(requestInstance -> {
var metadata = new CoreDumpMetadata();
requestInstance.populateMetadata(metadata, FileSystems.getDefault());
return metadata;
})
.get();
}
ContainerPath coreDumpFile = findCoredumpFileInProcessingDirectory(coreDumpDirectory);
CoreDumpMetadata metadata = coreCollector.collect2(context, coreDumpFile);
metadata.setCpuMicrocodeVersion(getMicrocodeVersion())
.setKernelVersion(System.getProperty("os.version"))
.setCoreDumpPath(doneCoredumpsPath.resolve(context.containerName().asString())
.resolve(coreDumpDirectory.getFileName().toString())
.resolve(coreDumpFile.getFileName().toString()));
ReportCoreDumpRequest requestInstance = new ReportCoreDumpRequest();
requestInstance.fillFrom(metadata);
requestInstance.save(metadataPath);
context.log(logger, "Wrote " + metadataPath.pathOnHost());
return metadata;
}
private String getMicrocodeVersion() {
String output = uncheck(() -> Files.readAllLines(Paths.get("/proc/cpuinfo")).stream()
.filter(line -> line.startsWith("microcode"))
.findFirst()
.orElse("microcode : UNKNOWN"));
String[] results = output.split(":");
if (results.length != 2) {
throw ConvergenceException.ofError("Result from detect microcode command not as expected: " + output);
}
return results[1].trim();
}
} | class CoredumpHandler {
private static final Pattern HS_ERR_PATTERN = Pattern.compile("hs_err_pid[0-9]+\\.log");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
private static final String METADATA_FILE_NAME = "metadata.json";
private static final String METADATA2_FILE_NAME = "metadata2.json";
private static final String COMPRESSED_EXTENSION = ".zst";
private static final String ENCRYPTED_EXTENSION = ".enc";
public static final String COREDUMP_FILENAME_PREFIX = "dump_";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
private final CoreCollector coreCollector;
private final Cores cores;
private final CoredumpReporter coredumpReporter;
private final String crashPatchInContainer;
private final Path doneCoredumpsPath;
private final Metrics metrics;
private final Clock clock;
private final Supplier<String> coredumpIdSupplier;
private final SecretSharedKeySupplier secretSharedKeySupplier;
private final BooleanFlag reportCoresViaCfgFlag;
private final StringFlag coreEncryptionPublicKeyIdFlag;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
* @param doneCoredumpsPath path on host where processed core dumps are stored
*/
public CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
FlagSource flagSource) {
this(coreCollector, cores, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), (ctx) -> Optional.empty() /*TODO*/,
flagSource);
}
public CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
SecretSharedKeySupplier secretSharedKeySupplier, FlagSource flagSource) {
this(coreCollector, cores, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString(), secretSharedKeySupplier,
flagSource);
}
CoredumpHandler(CoreCollector coreCollector, Cores cores, CoredumpReporter coredumpReporter,
String crashPathInContainer, Path doneCoredumpsPath, Metrics metrics,
Clock clock, Supplier<String> coredumpIdSupplier,
SecretSharedKeySupplier secretSharedKeySupplier, FlagSource flagSource) {
this.coreCollector = coreCollector;
this.cores = cores;
this.coredumpReporter = coredumpReporter;
this.crashPatchInContainer = crashPathInContainer;
this.doneCoredumpsPath = doneCoredumpsPath;
this.metrics = metrics;
this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
this.secretSharedKeySupplier = secretSharedKeySupplier;
this.reportCoresViaCfgFlag = Flags.REPORT_CORES_VIA_CFG.bindTo(flagSource);
this.coreEncryptionPublicKeyIdFlag = Flags.CORE_ENCRYPTION_PUBLIC_KEY_ID.bindTo(flagSource);
}
public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier,
Optional<DockerImage> dockerImage, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPath);
if (throwIfCoreBeingWritten) {
List<String> pendingCores = FileFinder.files(containerCrashPath)
.match(fileAttributes -> !isReadyForProcessing(fileAttributes))
.maxDepth(1).stream()
.map(FileFinder.FileAttributes::filename)
.toList();
if (!pendingCores.isEmpty())
throw ConvergenceException.ofError(String.format("Cannot process %s coredumps: Still being written",
pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
}
getCoredumpToProcess(context, containerCrashPath, containerProcessingPath)
.ifPresent(path -> {
if (reportCoresViaCfgFlag.with(FetchVector.Dimension.NODE_TYPE, context.nodeType().name()).value()) {
processAndReportSingleCoreDump2(context, path, dockerImage);
} else {
processAndReportSingleCoredump(context, path, nodeAttributesSupplier);
}
});
}
/** @return path to directory inside processing directory that contains a core dump file to process */
Optional<ContainerPath> getCoredumpToProcess(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
return FileFinder.directories(containerProcessingPath).stream()
.map(FileFinder.FileAttributes::path)
.findAny()
.map(ContainerPath.class::cast)
.or(() -> enqueueCoredump(context, containerCrashPath, containerProcessingPath));
}
/**
* Moves a coredump and related hs_err file(s) to a new directory under the processing/ directory.
* Limit to only processing one coredump at the time, starting with the oldest.
*
* Assumption: hs_err files are much smaller than core files and are written (last modified time)
* before the core file.
*
* @return path to directory inside processing directory which contains the enqueued core dump file
*/
Optional<ContainerPath> enqueueCoredump(NodeAgentContext context, ContainerPath containerCrashPath, ContainerPath containerProcessingPath) {
Predicate<String> isCoreDump = filename -> !HS_ERR_PATTERN.matcher(filename).matches();
List<Path> toProcess = FileFinder.files(containerCrashPath)
.match(attributes -> {
if (isReadyForProcessing(attributes)) {
return true;
} else {
if (isCoreDump.test(attributes.filename()))
context.log(logger, attributes.path() + " is still being written");
return false;
}
})
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
.map(FileFinder.FileAttributes::path)
.toList();
int coredumpIndex = IntStream.range(0, toProcess.size())
.filter(i -> isCoreDump.test(toProcess.get(i).getFileName().toString()))
.findFirst()
.orElse(-1);
if (coredumpIndex == -1) return Optional.empty();
ContainerPath enqueuedDir = containerProcessingPath.resolve(coredumpIdSupplier.get());
new MakeDirectory(enqueuedDir).createParents().converge(context);
IntStream.range(0, coredumpIndex + 1)
.forEach(i -> {
Path path = toProcess.get(i);
String prefix = i == coredumpIndex ? COREDUMP_FILENAME_PREFIX : "";
new FileMover(path, enqueuedDir.resolve(prefix + path.getFileName())).converge(context);
});
return Optional.of(enqueuedDir);
}
void processAndReportSingleCoredump(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier) {
try {
Optional<SecretSharedKey> sharedCoreKey = Optional.of(corePublicKeyFlagValue(context))
.filter(k -> !k.isEmpty())
.map(KeyId::ofString)
.flatMap(secretSharedKeySupplier::create);
Optional<String> decryptionToken = sharedCoreKey.map(k -> k.sealedSharedKey().toTokenString());
String metadata = getMetadata(context, coredumpDirectory, nodeAttributesSupplier, decryptionToken);
String coredumpId = coredumpDirectory.getFileName().toString();
coredumpReporter.reportCoredump(coredumpId, metadata);
finishProcessing(context, coredumpDirectory, sharedCoreKey);
context.log(logger, "Successfully reported coredump " + coredumpId);
} catch (Exception e) {
throw new RuntimeException("Failed to process coredump " + coredumpDirectory, e);
}
}
/**
* @return coredump metadata from metadata.json if present, otherwise attempts to get metadata using
* {@link CoreCollector} and stores it to metadata.json
*/
String getMetadata(NodeAgentContext context, ContainerPath coredumpDirectory, Supplier<Map<String, Object>> nodeAttributesSupplier, Optional<String> decryptionToken) throws IOException {
UnixPath metadataPath = new UnixPath(coredumpDirectory.resolve(METADATA_FILE_NAME));
if (!metadataPath.exists()) {
ContainerPath coredumpFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFile));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath
.resolve(context.containerName().asString())
.resolve(coredumpDirectory.getFileName().toString())
.resolve(coredumpFile.getFileName().toString()).toString());
decryptionToken.ifPresent(token -> metadata.put("decryption_token", token));
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
if (decryptionToken.isPresent()) {
String metadataFields = metadataWithPatchedTokenValue(metadataPath, decryptionToken.get());
metadataPath.deleteIfExists();
metadataPath.writeUtf8File(metadataFields);
return metadataFields;
} else {
return metadataPath.readUtf8File();
}
}
}
private String metadataWithPatchedTokenValue(UnixPath metadataPath, String decryptionToken) throws JsonProcessingException {
var jsonRoot = objectMapper.readTree(metadataPath.readUtf8File());
if (jsonRoot.path("fields").isObject()) {
((ObjectNode)jsonRoot.get("fields")).put("decryption_token", decryptionToken);
}
return objectMapper.writeValueAsString(jsonRoot);
}
static OutputStream maybeWrapWithEncryption(OutputStream wrappedStream, Optional<SecretSharedKey> sharedCoreKey) {
return sharedCoreKey
.map(key -> (OutputStream)new CipherOutputStream(wrappedStream, SharedKeyGenerator.makeAesGcmEncryptionCipher(key)))
.orElse(wrappedStream);
}
/**
* Compresses and, if a key is provided, encrypts core file (and deletes the uncompressed core), then moves
* the entire core dump processing directory to {@link
*/
private void finishProcessing(NodeAgentContext context, ContainerPath coredumpDirectory, Optional<SecretSharedKey> sharedCoreKey) {
ContainerPath coreFile = findCoredumpFileInProcessingDirectory(coredumpDirectory);
String extension = COMPRESSED_EXTENSION + (sharedCoreKey.isPresent() ? ENCRYPTED_EXTENSION : "");
ContainerPath compressedCoreFile = coreFile.resolveSibling(coreFile.getFileName() + extension);
try (ZstdCompressingInputStream zcis = new ZstdCompressingInputStream(Files.newInputStream(coreFile));
OutputStream fos = maybeWrapWithEncryption(Files.newOutputStream(compressedCoreFile), sharedCoreKey)) {
zcis.transferTo(fos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
new FileDeleter(coreFile).converge(context);
Path newCoredumpDirectory = doneCoredumpsPath.resolve(context.containerName().asString());
new MakeDirectory(newCoredumpDirectory).createParents().converge(context);
new FileMover(coredumpDirectory.pathOnHost(), newCoredumpDirectory.resolve(coredumpDirectory.getFileName().toString()))
.converge(context);
}
ContainerPath findCoredumpFileInProcessingDirectory(ContainerPath coredumpProccessingDirectory) {
return (ContainerPath) FileFinder.files(coredumpProccessingDirectory)
.match(nameStartsWith(COREDUMP_FILENAME_PREFIX).and(nameEndsWith(COMPRESSED_EXTENSION).negate())
.and(nameEndsWith(ENCRYPTED_EXTENSION).negate()))
.maxDepth(1)
.stream()
.map(FileFinder.FileAttributes::path)
.findFirst()
.orElseThrow(() -> new IllegalStateException(
"No coredump file found in processing directory " + coredumpProccessingDirectory));
}
void updateMetrics(NodeAgentContext context, ContainerPath containerCrashPath) {
Dimensions dimensions = generateDimensions(context);
int numberOfUnprocessedCoredumps = FileFinder.files(containerCrashPath)
.match(nameStartsWith(".").negate())
.match(nameMatches(HS_ERR_PATTERN).negate())
.match(nameEndsWith(COMPRESSED_EXTENSION).negate())
.match(nameEndsWith(ENCRYPTED_EXTENSION).negate())
.match(nameStartsWith("metadata").negate())
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.enqueued", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfUnprocessedCoredumps);
Path processedCoredumpsPath = doneCoredumpsPath.resolve(context.containerName().asString());
int numberOfProcessedCoredumps = FileFinder.directories(processedCoredumpsPath)
.maxDepth(1)
.list().size();
metrics.declareGauge(Metrics.APPLICATION_NODE, "coredumps.processed", dimensions, Metrics.DimensionType.PRETAGGED).sample(numberOfProcessedCoredumps);
}
private Dimensions generateDimensions(NodeAgentContext context) {
NodeSpec node = context.node();
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", node.hostname())
.add("flavor", node.flavor())
.add("state", node.state().toString())
.add("zone", context.zone().getId().value());
node.owner().ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant().value())
.add("applicationName", owner.application().value())
.add("instanceName", owner.instance().value())
.add("app", String.join(".", owner.application().value(), owner.instance().value()))
.add("applicationId", owner.toFullString())
);
node.membership().ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.type().value())
.add("clusterid", membership.clusterId())
);
node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
dimensionsBuilder.add("orchestratorState", node.orchestratorStatus().asString());
dimensionsBuilder.add("system", context.zone().getSystemName().value());
return dimensionsBuilder.build();
}
private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
}
void processAndReportSingleCoreDump2(NodeAgentContext context, ContainerPath coreDumpDirectory,
Optional<DockerImage> dockerImage) {
CoreDumpMetadata metadata = gatherMetadata(context, coreDumpDirectory);
dockerImage.ifPresent(metadata::setDockerImage);
dockerImage.flatMap(DockerImage::tag).ifPresent(metadata::setVespaVersion);
dockerImage.ifPresent(metadata::setDockerImage);
Optional<SecretSharedKey> sharedCoreKey = Optional.of(corePublicKeyFlagValue(context))
.filter(k -> !k.isEmpty())
.map(KeyId::ofString)
.flatMap(secretSharedKeySupplier::create);
sharedCoreKey.map(key -> key.sealedSharedKey().toTokenString()).ifPresent(metadata::setDecryptionToken);
String coreDumpId = coreDumpDirectory.getFileName().toString();
cores.report(context.hostname(), coreDumpId, metadata);
context.log(logger, "Core dump reported: " + coreDumpId);
finishProcessing(context, coreDumpDirectory, sharedCoreKey);
}
private CoreDumpMetadata gatherMetadata(NodeAgentContext context, ContainerPath coreDumpDirectory) {
ContainerPath metadataPath = coreDumpDirectory.resolve(METADATA2_FILE_NAME);
Optional<ReportCoreDumpRequest> request = ReportCoreDumpRequest.load(metadataPath);
if (request.isPresent()) {
return request.map(requestInstance -> {
var metadata = new CoreDumpMetadata();
requestInstance.populateMetadata(metadata, FileSystems.getDefault());
return metadata;
})
.get();
}
ContainerPath coreDumpFile = findCoredumpFileInProcessingDirectory(coreDumpDirectory);
CoreDumpMetadata metadata = coreCollector.collect2(context, coreDumpFile);
metadata.setCpuMicrocodeVersion(getMicrocodeVersion())
.setKernelVersion(System.getProperty("os.version"))
.setCoreDumpPath(doneCoredumpsPath.resolve(context.containerName().asString())
.resolve(coreDumpDirectory.getFileName().toString())
.resolve(coreDumpFile.getFileName().toString()));
ReportCoreDumpRequest requestInstance = new ReportCoreDumpRequest();
requestInstance.fillFrom(metadata);
requestInstance.save(metadataPath);
context.log(logger, "Wrote " + metadataPath.pathOnHost());
return metadata;
}
private String getMicrocodeVersion() {
String output = uncheck(() -> Files.readAllLines(Paths.get("/proc/cpuinfo")).stream()
.filter(line -> line.startsWith("microcode"))
.findFirst()
.orElse("microcode : UNKNOWN"));
String[] results = output.split(":");
if (results.length != 2) {
throw ConvergenceException.ofError("Result from detect microcode command not as expected: " + output);
}
return results[1].trim();
}
} |
Before you ask: 1. When a new singleton is registered, potentially as a result of a reload of user components (only), it will be registered with this same singleton manager, for which there should only ever be one lock. 2. When a new singleton manager is created in the same container, as a result of curator or zookeeper config changing, the new curator will use a different client, and see a different set of ephemeral nodes. | private void cleanOrphans() {
List<String> orphans = null;
try {
for (String orphan : orphans = curator.framework().getZookeeperClient().getZooKeeper().getEphemerals(path.getAbsolute()))
curator.delete(path.append(orphan));
}
catch (Exception e) {
logger.log(WARNING, "Failed cleaning orphans: " + orphans, e);
}
} | curator.delete(path.append(orphan)); | private void cleanOrphans() {
List<String> orphans = null;
try {
for (String orphan : orphans = curator.framework().getZookeeperClient().getZooKeeper().getEphemerals(path.getAbsolute()))
curator.delete(path.append(orphan));
}
catch (Exception e) {
logger.log(WARNING, "Failed cleaning orphans: " + orphans, e);
}
} | class Task {
enum Type { register, unregister }
final Type type;
final SingletonWorker singleton;
final CompletableFuture<?> future = new CompletableFuture<>();
private Task(Type type, SingletonWorker singleton) {
this.type = type;
this.singleton = singleton;
}
static Task register(SingletonWorker singleton) { return new Task(Type.register, singleton); }
static Task unregister(SingletonWorker singleton) { return new Task(Type.unregister, singleton); }
} | class Task {
enum Type { register, unregister }
final Type type;
final SingletonWorker singleton;
final CompletableFuture<?> future = new CompletableFuture<>();
private Task(Type type, SingletonWorker singleton) {
this.type = type;
this.singleton = singleton;
}
static Task register(SingletonWorker singleton) { return new Task(Type.register, singleton); }
static Task unregister(SingletonWorker singleton) { return new Task(Type.unregister, singleton); }
} |
Leave a comment on getEphemerals()? It only fetches the ephemerals owned by this session, which is not obvious but important. | private void cleanOrphans() {
List<String> orphans = null;
try {
for (String orphan : orphans = curator.framework().getZookeeperClient().getZooKeeper().getEphemerals(path.getAbsolute()))
curator.delete(path.append(orphan));
}
catch (Exception e) {
logger.log(WARNING, "Failed cleaning orphans: " + orphans, e);
}
} | curator.delete(path.append(orphan)); | private void cleanOrphans() {
List<String> orphans = null;
try {
for (String orphan : orphans = curator.framework().getZookeeperClient().getZooKeeper().getEphemerals(path.getAbsolute()))
curator.delete(path.append(orphan));
}
catch (Exception e) {
logger.log(WARNING, "Failed cleaning orphans: " + orphans, e);
}
} | class Task {
enum Type { register, unregister }
final Type type;
final SingletonWorker singleton;
final CompletableFuture<?> future = new CompletableFuture<>();
private Task(Type type, SingletonWorker singleton) {
this.type = type;
this.singleton = singleton;
}
static Task register(SingletonWorker singleton) { return new Task(Type.register, singleton); }
static Task unregister(SingletonWorker singleton) { return new Task(Type.unregister, singleton); }
} | class Task {
enum Type { register, unregister }
final Type type;
final SingletonWorker singleton;
final CompletableFuture<?> future = new CompletableFuture<>();
private Task(Type type, SingletonWorker singleton) {
this.type = type;
this.singleton = singleton;
}
static Task register(SingletonWorker singleton) { return new Task(Type.register, singleton); }
static Task unregister(SingletonWorker singleton) { return new Task(Type.unregister, singleton); }
} |
```suggestion // Only the ephemerals owned by this client session are listed here, and this client should only ever attempt this lock from this thread, i.e., 0 or 1 nodes. for (String orphan : orphans = curator.framework().getZookeeperClient().getZooKeeper().getEphemerals(path.getAbsolute())) ``` | private void cleanOrphans() {
List<String> orphans = null;
try {
for (String orphan : orphans = curator.framework().getZookeeperClient().getZooKeeper().getEphemerals(path.getAbsolute()))
curator.delete(path.append(orphan));
}
catch (Exception e) {
logger.log(WARNING, "Failed cleaning orphans: " + orphans, e);
}
} | for (String orphan : orphans = curator.framework().getZookeeperClient().getZooKeeper().getEphemerals(path.getAbsolute())) | private void cleanOrphans() {
List<String> orphans = null;
try {
for (String orphan : orphans = curator.framework().getZookeeperClient().getZooKeeper().getEphemerals(path.getAbsolute()))
curator.delete(path.append(orphan));
}
catch (Exception e) {
logger.log(WARNING, "Failed cleaning orphans: " + orphans, e);
}
} | class Task {
enum Type { register, unregister }
final Type type;
final SingletonWorker singleton;
final CompletableFuture<?> future = new CompletableFuture<>();
private Task(Type type, SingletonWorker singleton) {
this.type = type;
this.singleton = singleton;
}
static Task register(SingletonWorker singleton) { return new Task(Type.register, singleton); }
static Task unregister(SingletonWorker singleton) { return new Task(Type.unregister, singleton); }
} | class Task {
enum Type { register, unregister }
final Type type;
final SingletonWorker singleton;
final CompletableFuture<?> future = new CompletableFuture<>();
private Task(Type type, SingletonWorker singleton) {
this.type = type;
this.singleton = singleton;
}
static Task register(SingletonWorker singleton) { return new Task(Type.register, singleton); }
static Task unregister(SingletonWorker singleton) { return new Task(Type.unregister, singleton); }
} |
Would it be appropriate to log something here? | public void close() {
executor.shutdown();
try {
if ( ! executor.awaitTermination(10, TimeUnit.SECONDS)) {
executor.shutdownNow();
}
} catch (InterruptedException e) { }
} | } catch (InterruptedException e) { } | public void close() {
executor.shutdown();
try {
if ( ! executor.awaitTermination(10, TimeUnit.SECONDS)) {
executor.shutdownNow();
}
} catch (InterruptedException e) { }
} | class CloudSubscriberFactory implements SubscriberFactory {
private final ConfigSource configSource;
private final Map<CloudSubscriber, Integer> activeSubscribers = new WeakHashMap<>();
private final ExecutorService executor;
private Optional<Long> testGeneration = Optional.empty();
public CloudSubscriberFactory(ConfigSource configSource) {
this.configSource = configSource;
executor = new ThreadPoolExecutor(1, Integer.MAX_VALUE,
1, TimeUnit.SECONDS, new SynchronousQueue<>(),
new DaemonThreadFactory("cloud-subscriber-factory"));
}
@Override
@Override
public Subscriber getSubscriber(Set<? extends ConfigKey<?>> configKeys, String name) {
Set<ConfigKey<ConfigInstance>> subscriptionKeys = new HashSet<>();
for(ConfigKey<?> key: configKeys) {
@SuppressWarnings("unchecked")
ConfigKey<ConfigInstance> invariant = (ConfigKey<ConfigInstance>) key;
subscriptionKeys.add(invariant);
}
CloudSubscriber subscriber = new CloudSubscriber(executor, name, configSource, subscriptionKeys);
testGeneration.ifPresent(subscriber.getSubscriber()::reload);
activeSubscribers.put(subscriber, 0);
return subscriber;
}
@Override
public void reloadActiveSubscribers(long generation) {
testGeneration = Optional.of(generation);
List<CloudSubscriber> subscribers = new ArrayList<>(activeSubscribers.keySet());
subscribers.forEach(s -> s.getSubscriber().reload(generation));
}
public static class Provider implements com.google.inject.Provider<SubscriberFactory> {
@Override
public SubscriberFactory get() {
return new CloudSubscriberFactory(ConfigSourceSet.createDefault());
}
}
} | class CloudSubscriberFactory implements SubscriberFactory {
private final ConfigSource configSource;
private final Map<CloudSubscriber, Integer> activeSubscribers = new WeakHashMap<>();
private final ExecutorService executor;
private Optional<Long> testGeneration = Optional.empty();
public CloudSubscriberFactory(ConfigSource configSource) {
this.configSource = configSource;
executor = new ThreadPoolExecutor(1, Integer.MAX_VALUE,
1, TimeUnit.SECONDS, new SynchronousQueue<>(),
new DaemonThreadFactory("cloud-subscriber-factory"));
}
@Override
@Override
public Subscriber getSubscriber(Set<? extends ConfigKey<?>> configKeys, String name) {
Set<ConfigKey<ConfigInstance>> subscriptionKeys = new HashSet<>();
for(ConfigKey<?> key: configKeys) {
@SuppressWarnings("unchecked")
ConfigKey<ConfigInstance> invariant = (ConfigKey<ConfigInstance>) key;
subscriptionKeys.add(invariant);
}
CloudSubscriber subscriber = new CloudSubscriber(executor, name, configSource, subscriptionKeys);
testGeneration.ifPresent(subscriber.getSubscriber()::reload);
activeSubscribers.put(subscriber, 0);
return subscriber;
}
@Override
public void reloadActiveSubscribers(long generation) {
testGeneration = Optional.of(generation);
List<CloudSubscriber> subscribers = new ArrayList<>(activeSubscribers.keySet());
subscribers.forEach(s -> s.getSubscriber().reload(generation));
}
public static class Provider implements com.google.inject.Provider<SubscriberFactory> {
@Override
public SubscriberFactory get() {
return new CloudSubscriberFactory(ConfigSourceSet.createDefault());
}
}
} |
```suggestion Math.max(1, (int)Math.ceil(resources.vcpu()*MAX_FLUSH_THREAD_RATIO)))); ``` | private void tuneFlushConcurrentThreads(ProtonConfig.Flush.Builder builder) {
if (usableMemoryGb() < MIN_MEMORY_PER_FLUSH_THREAD_GB) {
builder.maxconcurrent(1);
}
builder.maxconcurrent(Math.min(builder.build().maxconcurrent(),
Math.max(1, (int)Math.ceil(resources.vcpu()*MAX_FLUSH_TREAD_RATIO))));
} | Math.max(1, (int)Math.ceil(resources.vcpu()*MAX_FLUSH_TREAD_RATIO)))); | private void tuneFlushConcurrentThreads(ProtonConfig.Flush.Builder builder) {
if (usableMemoryGb() < MIN_MEMORY_PER_FLUSH_THREAD_GB) {
builder.maxconcurrent(1);
}
builder.maxconcurrent(Math.min(builder.build().maxconcurrent(),
Math.max(1, (int)Math.ceil(resources.vcpu()*MAX_FLUSH_THREAD_RATIO))));
} | class NodeResourcesTuning implements ProtonConfig.Producer {
private final static double SUMMARY_FILE_SIZE_AS_FRACTION_OF_MEMORY = 0.02;
private final static double SUMMARY_CACHE_SIZE_AS_FRACTION_OF_MEMORY = 0.04;
private final static double MEMORY_GAIN_AS_FRACTION_OF_MEMORY = 0.08;
private final static double MIN_MEMORY_PER_FLUSH_THREAD_GB = 16.0;
private final static double MAX_FLUSH_TREAD_RATIO = 1.0/8;
private final static double TLS_SIZE_FRACTION = 0.02;
final static long MB = 1024 * 1024;
public final static long GB = MB * 1024;
private final static long MEMORY_COST_PER_DOCUMENT_STORE_ONLY = 46L;
private final NodeResources resources;
private final int threadsPerSearch;
private final double fractionOfMemoryReserved;
public static final double reservedMemoryGb = 0.5;
public NodeResourcesTuning(NodeResources resources,
int threadsPerSearch,
double fractionOfMemoryReserved) {
this.resources = resources;
this.threadsPerSearch = threadsPerSearch;
this.fractionOfMemoryReserved = fractionOfMemoryReserved;
}
@Override
public void getConfig(ProtonConfig.Builder builder) {
setHwInfo(builder);
tuneDiskWriteSpeed(builder);
tuneRequestThreads(builder);
tuneDocumentStoreMaxFileSize(builder.summary.log);
tuneFlushStrategyMemoryLimits(builder.flush.memory);
tuneFlushStrategyTlsSize(builder.flush.memory);
tuneFlushConcurrentThreads(builder.flush);
tuneSummaryReadIo(builder.summary.read);
tuneSummaryCache(builder.summary.cache);
tuneSearchReadIo(builder.search.mmap);
for (ProtonConfig.Documentdb.Builder dbb : builder.documentdb) {
getConfig(dbb);
}
}
private void getConfig(ProtonConfig.Documentdb.Builder builder) {
ProtonConfig.Documentdb dbCfg = builder.build();
if (dbCfg.mode() != ProtonConfig.Documentdb.Mode.Enum.INDEX) {
long numDocs = (long)usableMemoryGb() * GB / MEMORY_COST_PER_DOCUMENT_STORE_ONLY;
builder.allocation.initialnumdocs(numDocs);
}
}
private void tuneSummaryCache(ProtonConfig.Summary.Cache.Builder builder) {
long memoryLimitBytes = (long) ((usableMemoryGb() * SUMMARY_CACHE_SIZE_AS_FRACTION_OF_MEMORY) * GB);
builder.maxbytes(memoryLimitBytes);
}
private void setHwInfo(ProtonConfig.Builder builder) {
builder.hwinfo.disk.shared(true);
builder.hwinfo.cpu.cores((int)resources.vcpu());
builder.hwinfo.memory.size((long)(usableMemoryGb() * GB));
builder.hwinfo.disk.size((long)(resources.diskGb() * GB));
}
private void tuneDiskWriteSpeed(ProtonConfig.Builder builder) {
if (resources.diskSpeed() != NodeResources.DiskSpeed.fast) {
builder.hwinfo.disk.writespeed(40);
}
}
private void tuneDocumentStoreMaxFileSize(ProtonConfig.Summary.Log.Builder builder) {
long fileSizeBytes = (long) Math.max(256*MB, usableMemoryGb()*GB*SUMMARY_FILE_SIZE_AS_FRACTION_OF_MEMORY);
builder.maxfilesize(fileSizeBytes);
}
private void tuneFlushStrategyMemoryLimits(ProtonConfig.Flush.Memory.Builder builder) {
long memoryLimitBytes = (long) ((usableMemoryGb() * MEMORY_GAIN_AS_FRACTION_OF_MEMORY) * GB);
builder.maxmemory(memoryLimitBytes);
builder.each.maxmemory(memoryLimitBytes);
}
private void tuneFlushStrategyTlsSize(ProtonConfig.Flush.Memory.Builder builder) {
long tlsSizeBytes = (long) ((resources.diskGb() * TLS_SIZE_FRACTION) * GB);
tlsSizeBytes = max(2*GB, min(tlsSizeBytes, 100 * GB));
builder.maxtlssize(tlsSizeBytes);
}
private void tuneSummaryReadIo(ProtonConfig.Summary.Read.Builder builder) {
if (resources.diskSpeed() == NodeResources.DiskSpeed.fast) {
builder.io(ProtonConfig.Summary.Read.Io.DIRECTIO);
}
}
private void tuneSearchReadIo(ProtonConfig.Search.Mmap.Builder builder) {
if (resources.diskSpeed() == NodeResources.DiskSpeed.fast) {
builder.advise(ProtonConfig.Search.Mmap.Advise.RANDOM);
}
}
private void tuneRequestThreads(ProtonConfig.Builder builder) {
int numCores = (int)Math.ceil(resources.vcpu());
builder.numsearcherthreads(numCores*threadsPerSearch);
builder.numsummarythreads(numCores);
builder.numthreadspersearch(threadsPerSearch);
}
/** Returns the memory we can expect will be available for the content node processes */
private double usableMemoryGb() {
double usableMemoryGb = resources.memoryGb() - reservedMemoryGb;
return usableMemoryGb * (1 - fractionOfMemoryReserved);
}
} | class NodeResourcesTuning implements ProtonConfig.Producer {
private final static double SUMMARY_FILE_SIZE_AS_FRACTION_OF_MEMORY = 0.02;
private final static double SUMMARY_CACHE_SIZE_AS_FRACTION_OF_MEMORY = 0.04;
private final static double MEMORY_GAIN_AS_FRACTION_OF_MEMORY = 0.08;
private final static double MIN_MEMORY_PER_FLUSH_THREAD_GB = 16.0;
private final static double MAX_FLUSH_THREAD_RATIO = 1.0/8;
private final static double TLS_SIZE_FRACTION = 0.02;
final static long MB = 1024 * 1024;
public final static long GB = MB * 1024;
private final static long MEMORY_COST_PER_DOCUMENT_STORE_ONLY = 46L;
private final NodeResources resources;
private final int threadsPerSearch;
private final double fractionOfMemoryReserved;
public static final double reservedMemoryGb = 0.5;
public NodeResourcesTuning(NodeResources resources,
int threadsPerSearch,
double fractionOfMemoryReserved) {
this.resources = resources;
this.threadsPerSearch = threadsPerSearch;
this.fractionOfMemoryReserved = fractionOfMemoryReserved;
}
@Override
public void getConfig(ProtonConfig.Builder builder) {
setHwInfo(builder);
tuneDiskWriteSpeed(builder);
tuneRequestThreads(builder);
tuneDocumentStoreMaxFileSize(builder.summary.log);
tuneFlushStrategyMemoryLimits(builder.flush.memory);
tuneFlushStrategyTlsSize(builder.flush.memory);
tuneFlushConcurrentThreads(builder.flush);
tuneSummaryReadIo(builder.summary.read);
tuneSummaryCache(builder.summary.cache);
tuneSearchReadIo(builder.search.mmap);
for (ProtonConfig.Documentdb.Builder dbb : builder.documentdb) {
getConfig(dbb);
}
}
private void getConfig(ProtonConfig.Documentdb.Builder builder) {
ProtonConfig.Documentdb dbCfg = builder.build();
if (dbCfg.mode() != ProtonConfig.Documentdb.Mode.Enum.INDEX) {
long numDocs = (long)usableMemoryGb() * GB / MEMORY_COST_PER_DOCUMENT_STORE_ONLY;
builder.allocation.initialnumdocs(numDocs);
}
}
private void tuneSummaryCache(ProtonConfig.Summary.Cache.Builder builder) {
long memoryLimitBytes = (long) ((usableMemoryGb() * SUMMARY_CACHE_SIZE_AS_FRACTION_OF_MEMORY) * GB);
builder.maxbytes(memoryLimitBytes);
}
private void setHwInfo(ProtonConfig.Builder builder) {
builder.hwinfo.disk.shared(true);
builder.hwinfo.cpu.cores((int)resources.vcpu());
builder.hwinfo.memory.size((long)(usableMemoryGb() * GB));
builder.hwinfo.disk.size((long)(resources.diskGb() * GB));
}
private void tuneDiskWriteSpeed(ProtonConfig.Builder builder) {
if (resources.diskSpeed() != NodeResources.DiskSpeed.fast) {
builder.hwinfo.disk.writespeed(40);
}
}
private void tuneDocumentStoreMaxFileSize(ProtonConfig.Summary.Log.Builder builder) {
long fileSizeBytes = (long) Math.max(256*MB, usableMemoryGb()*GB*SUMMARY_FILE_SIZE_AS_FRACTION_OF_MEMORY);
builder.maxfilesize(fileSizeBytes);
}
private void tuneFlushStrategyMemoryLimits(ProtonConfig.Flush.Memory.Builder builder) {
long memoryLimitBytes = (long) ((usableMemoryGb() * MEMORY_GAIN_AS_FRACTION_OF_MEMORY) * GB);
builder.maxmemory(memoryLimitBytes);
builder.each.maxmemory(memoryLimitBytes);
}
private void tuneFlushStrategyTlsSize(ProtonConfig.Flush.Memory.Builder builder) {
long tlsSizeBytes = (long) ((resources.diskGb() * TLS_SIZE_FRACTION) * GB);
tlsSizeBytes = max(2*GB, min(tlsSizeBytes, 100 * GB));
builder.maxtlssize(tlsSizeBytes);
}
private void tuneSummaryReadIo(ProtonConfig.Summary.Read.Builder builder) {
if (resources.diskSpeed() == NodeResources.DiskSpeed.fast) {
builder.io(ProtonConfig.Summary.Read.Io.DIRECTIO);
}
}
private void tuneSearchReadIo(ProtonConfig.Search.Mmap.Builder builder) {
if (resources.diskSpeed() == NodeResources.DiskSpeed.fast) {
builder.advise(ProtonConfig.Search.Mmap.Advise.RANDOM);
}
}
private void tuneRequestThreads(ProtonConfig.Builder builder) {
int numCores = (int)Math.ceil(resources.vcpu());
builder.numsearcherthreads(numCores*threadsPerSearch);
builder.numsummarythreads(numCores);
builder.numthreadspersearch(threadsPerSearch);
}
/** Returns the memory we can expect will be available for the content node processes */
private double usableMemoryGb() {
double usableMemoryGb = resources.memoryGb() - reservedMemoryGb;
return usableMemoryGb * (1 - fractionOfMemoryReserved);
}
} |
Yes, I will investigate a bit more if I can unwarp on ExecutionException or log. | public void close() {
executor.shutdown();
try {
if ( ! executor.awaitTermination(10, TimeUnit.SECONDS)) {
executor.shutdownNow();
}
} catch (InterruptedException e) { }
} | } catch (InterruptedException e) { } | public void close() {
executor.shutdown();
try {
if ( ! executor.awaitTermination(10, TimeUnit.SECONDS)) {
executor.shutdownNow();
}
} catch (InterruptedException e) { }
} | class CloudSubscriberFactory implements SubscriberFactory {
private final ConfigSource configSource;
private final Map<CloudSubscriber, Integer> activeSubscribers = new WeakHashMap<>();
private final ExecutorService executor;
private Optional<Long> testGeneration = Optional.empty();
public CloudSubscriberFactory(ConfigSource configSource) {
this.configSource = configSource;
executor = new ThreadPoolExecutor(1, Integer.MAX_VALUE,
1, TimeUnit.SECONDS, new SynchronousQueue<>(),
new DaemonThreadFactory("cloud-subscriber-factory"));
}
@Override
@Override
public Subscriber getSubscriber(Set<? extends ConfigKey<?>> configKeys, String name) {
Set<ConfigKey<ConfigInstance>> subscriptionKeys = new HashSet<>();
for(ConfigKey<?> key: configKeys) {
@SuppressWarnings("unchecked")
ConfigKey<ConfigInstance> invariant = (ConfigKey<ConfigInstance>) key;
subscriptionKeys.add(invariant);
}
CloudSubscriber subscriber = new CloudSubscriber(executor, name, configSource, subscriptionKeys);
testGeneration.ifPresent(subscriber.getSubscriber()::reload);
activeSubscribers.put(subscriber, 0);
return subscriber;
}
@Override
public void reloadActiveSubscribers(long generation) {
testGeneration = Optional.of(generation);
List<CloudSubscriber> subscribers = new ArrayList<>(activeSubscribers.keySet());
subscribers.forEach(s -> s.getSubscriber().reload(generation));
}
public static class Provider implements com.google.inject.Provider<SubscriberFactory> {
@Override
public SubscriberFactory get() {
return new CloudSubscriberFactory(ConfigSourceSet.createDefault());
}
}
} | class CloudSubscriberFactory implements SubscriberFactory {
private final ConfigSource configSource;
private final Map<CloudSubscriber, Integer> activeSubscribers = new WeakHashMap<>();
private final ExecutorService executor;
private Optional<Long> testGeneration = Optional.empty();
public CloudSubscriberFactory(ConfigSource configSource) {
this.configSource = configSource;
executor = new ThreadPoolExecutor(1, Integer.MAX_VALUE,
1, TimeUnit.SECONDS, new SynchronousQueue<>(),
new DaemonThreadFactory("cloud-subscriber-factory"));
}
@Override
@Override
public Subscriber getSubscriber(Set<? extends ConfigKey<?>> configKeys, String name) {
Set<ConfigKey<ConfigInstance>> subscriptionKeys = new HashSet<>();
for(ConfigKey<?> key: configKeys) {
@SuppressWarnings("unchecked")
ConfigKey<ConfigInstance> invariant = (ConfigKey<ConfigInstance>) key;
subscriptionKeys.add(invariant);
}
CloudSubscriber subscriber = new CloudSubscriber(executor, name, configSource, subscriptionKeys);
testGeneration.ifPresent(subscriber.getSubscriber()::reload);
activeSubscribers.put(subscriber, 0);
return subscriber;
}
@Override
public void reloadActiveSubscribers(long generation) {
testGeneration = Optional.of(generation);
List<CloudSubscriber> subscribers = new ArrayList<>(activeSubscribers.keySet());
subscribers.forEach(s -> s.getSubscriber().reload(generation));
}
public static class Provider implements com.google.inject.Provider<SubscriberFactory> {
@Override
public SubscriberFactory get() {
return new CloudSubscriberFactory(ConfigSourceSet.createDefault());
}
}
} |
@mpolden Please explain and I'll remove the comment 😄 | void testCloudAccountWithDefaultOverride() {
var context = tester.newDeploymentContext();
var prodZone1 = productionUsEast3.zone();
var prodZone2 = productionUsWest1.zone();
var cloudAccount = "012345678912";
var application = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone1.region())
.region(prodZone2.region(), "default")
.build();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone1);
context.submit(application).deploy();
assertEquals(cloudAccount, tester.controllerTester().configServer().cloudAccount(context.deploymentIdIn(prodZone1)).get().value());
assertEquals(Optional.empty(), tester.controllerTester().configServer().cloudAccount(context.deploymentIdIn(prodZone2)));
} | void testCloudAccountWithDefaultOverride() {
var context = tester.newDeploymentContext();
var prodZone1 = productionUsEast3.zone();
var prodZone2 = productionUsWest1.zone();
var cloudAccount = "012345678912";
var application = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone1.region())
.region(prodZone2.region(), "default")
.build();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
tester.controllerTester().zoneRegistry().configureCloudAccount(CloudAccount.from(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone1);
context.submit(application).deploy();
assertEquals(cloudAccount, tester.controllerTester().configServer().cloudAccount(context.deploymentIdIn(prodZone1)).get().value());
assertEquals(Optional.empty(), tester.controllerTester().configServer().cloudAccount(context.deploymentIdIn(prodZone2)));
} | class ControllerTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
void testDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.explicitEnvironment(Environment.dev, Environment.perf)
.region("us-west-1")
.region("us-east-3")
.build();
Version version1 = tester.configServer().initialVersion();
var context = tester.newDeploymentContext();
context.submit(applicationPackage);
assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)),
context.application().revisions().get(context.instance().change().revision().get()),
"Application version is known from completion of initial job");
context.runJob(systemTest);
context.runJob(stagingTest);
RevisionId applicationVersion = context.instance().change().revision().get();
assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment");
tester.triggerJobs();
tester.clock().advance(Duration.ofSeconds(1));
context.timeOutUpgrade(productionUsWest1);
assertEquals(4, context.instanceJobs().size());
tester.triggerJobs();
tester.controllerTester().createNewController();
assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1")));
assertNotNull(tester.controller().applications().requireInstance(context.instanceId()));
context.submit(applicationPackage);
context.runJob(systemTest);
context.runJob(stagingTest);
context.triggerJobs().jobAborted(productionUsWest1);
context.runJob(productionUsWest1);
tester.triggerJobs();
context.runJob(productionUsEast3);
assertEquals(4, context.instanceJobs().size());
applicationPackage = new ApplicationPackageBuilder()
.instances("hellO")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("deep-space-9")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("us-east-3")
.build();
try {
assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1")));
context.submit(applicationPackage);
fail("Expected exception due to illegal production deployment removal");
}
catch (IllegalArgumentException e) {
assertEquals("deployment-removal: application instance 'tenant.application.default' is deployed in us-west-1, " +
"but this instance and region combination is removed from deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval),
e.getMessage());
}
assertNotNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was not removed");
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.upgradePolicy("default")
.region("us-east-3")
.build();
context.submit(applicationPackage);
assertNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was removed");
assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed");
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.instanceId().tenant()));
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
assertNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.deploymentIdIn(productionUsWest1.zone())));
}
@Test
void testGlobalRotationStatus() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.build();
context.submit(applicationPackage).deploy();
var deployment1 = context.deploymentIdIn(zone1);
DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1);
RoutingStatus status1 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.in, status1.value());
routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator);
RoutingStatus status2 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.out, status2.value());
RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus();
assertEquals(RoutingStatus.Value.in, status3.value());
}
@Test
void testDnsUpdatesForGlobalEndpoint() {
var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta");
var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default");
ZoneId usWest = ZoneId.from("prod.us-west-1");
ZoneId usCentral = ZoneId.from("prod.us-central-1");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,default")
.endpoint("default", "foo")
.region(usWest.region())
.region(usCentral.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)),
RoutingMethod.sharedLayer4);
betaContext.submit(applicationPackage).deploy();
{
Collection<Deployment> betaDeployments = betaContext.instance().deployments().values();
assertFalse(betaDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("beta.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-01"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : betaDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints()
.get(betaContext.deploymentIdIn(deployment.zone())));
}
betaContext.flushDnsUpdates();
}
{
Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values();
assertFalse(defaultDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : defaultDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone())));
}
defaultContext.flushDnsUpdates();
}
Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.",
"app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02.");
rotationCnames.forEach((cname, data) -> {
var record = tester.controllerTester().findCname(cname);
assertTrue(record.isPresent());
assertEquals(cname, record.get().name().asString());
assertEquals(data, record.get().data().asString());
});
Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"),
defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud"));
globalDnsNamesByInstance.forEach((instance, dnsNames) -> {
Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance)
.scope(Endpoint.Scope.global)
.asList().stream()
.map(Endpoint::dnsName)
.collect(Collectors.toSet());
assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance);
});
}
@Test
void testDnsUpdatesForGlobalEndpointLegacySyntax() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
for (Deployment deployment : deployments) {
assertEquals(Set.of("rotation-id-01",
"app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(1, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId())
.scope(Endpoint.Scope.global)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"),
globalDnsNames);
}
@Test
void testDnsUpdatesForMultipleGlobalEndpoints() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("foobar", "qrs", "us-west-1", "us-central-1")
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.endpoint("all", "qrs")
.endpoint("west", "qrs", "us-west-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
var notWest = Set.of(
"rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud"
);
var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud"));
for (Deployment deployment : deployments) {
assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest,
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(4, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record1.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record2.isPresent());
assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record3.isPresent());
assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString());
assertEquals("rotation-fqdn-03.", record3.get().data().asString());
var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record4.isPresent());
assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString());
assertEquals("rotation-fqdn-04.", record4.get().data().asString());
}
@Test
void testDnsUpdatesForGlobalEndpointChanges() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
var west = ZoneId.from("prod", "us-west-1");
var central = ZoneId.from("prod", "us-central-1");
var east = ZoneId.from("prod", "us-east-3");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage2).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
assertEquals(
Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(east))
,
"Zone " + east + " is a member of global endpoint");
ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage3).deploy();
for (var zone : List.of(west, central, east)) {
assertEquals(
zone.equals(east)
? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud")
: Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage4);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " +
"and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage5);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage6);
}
@Test
void testUnassignRotations() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-central-1")
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage2).deploy();
assertEquals(List.of(), context.instance().rotations());
assertEquals(
Set.of(),
tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1")))
);
}
@Test
void testDnsUpdatesWithChangeInRotationAssignment() {
String dnsName1 = "app1.tenant1.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
{
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isPresent());
assertEquals(dnsName1, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage);
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.application().id().tenant()));
try (RotationLock lock = tester.controller().routing().rotations().lock()) {
assertTrue(tester.controller().routing().rotations().availableRotations(lock)
.containsKey(new RotationId("rotation-id-01")),
"Rotation is unassigned");
}
context.flushDnsUpdates();
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isEmpty(), dnsName1 + " is removed");
}
String dnsName2 = "app2.tenant2.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant2", "app2", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
var record = tester.controllerTester().findCname(dnsName2);
assertTrue(record.isPresent());
assertEquals(dnsName2, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString());
assertEquals(2, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname(dnsName1);
assertTrue(record1.isPresent());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname(dnsName2);
assertTrue(record2.isPresent());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
}
}
@Test
void testDnsUpdatesForApplicationEndpoint() {
ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta");
ApplicationId main = ApplicationId.from("tenant1", "app1", "main");
var context = tester.newDeploymentContext(beta);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region("us-east-3")
.region("us-west-1")
.region("aws-us-east-1a")
.region("aws-us-east-1b")
.applicationEndpoint("a", "default",
Map.of("aws-us-east-1a", Map.of(beta.instance(), 2,
main.instance(), 8),
"aws-us-east-1b", Map.of(main.instance(), 1)))
.applicationEndpoint("b", "default", "aws-us-east-1a",
Map.of(beta.instance(), 1,
main.instance(), 1))
.applicationEndpoint("c", "default", "aws-us-east-1b",
Map.of(beta.instance(), 4))
.applicationEndpoint("d", "default", "us-west-1",
Map.of(main.instance(), 7,
beta.instance(), 3))
.applicationEndpoint("e", "default", "us-east-3",
Map.of(main.instance(), 3))
.build();
context.submit(applicationPackage).deploy();
ZoneId east3 = ZoneId.from("prod", "us-east-3");
ZoneId west1 = ZoneId.from("prod", "us-west-1");
ZoneId east1a = ZoneId.from("prod", "aws-us-east-1a");
ZoneId east1b = ZoneId.from("prod", "aws-us-east-1b");
Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of(
new DeploymentId(beta, east3), Map.of(),
new DeploymentId(main, east3), Map.of("e.app1.tenant1.us-east-3-r.vespa.oath.cloud", 3),
new DeploymentId(beta, west1), Map.of("d.app1.tenant1.us-west-1-r.vespa.oath.cloud", 3),
new DeploymentId(main, west1), Map.of("d.app1.tenant1.us-west-1-r.vespa.oath.cloud", 7),
new DeploymentId(beta, east1a), Map.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 2,
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 1),
new DeploymentId(main, east1a), Map.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 8,
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 1),
new DeploymentId(beta, east1b), Map.of("c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud", 4),
new DeploymentId(main, east1b), Map.of("a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud", 1)
);
deploymentEndpoints.forEach((deployment, endpoints) -> {
Set<ContainerEndpoint> expected = endpoints.entrySet().stream()
.map(kv -> new ContainerEndpoint("default", "application",
List.of(kv.getKey()),
OptionalInt.of(kv.getValue()),
tester.controller().zoneRegistry().routingMethod(deployment.zoneId())))
.collect(Collectors.toSet());
assertEquals(expected,
tester.configServer().containerEndpoints().get(deployment),
"Endpoint names for " + deployment + " are passed to config server");
});
context.flushDnsUpdates();
Set<Record> records = tester.controllerTester().nameService().records();
assertEquals(new TreeSet<>(Set.of(new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1b.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1b.")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/2")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/8")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/4")),
new Record(Record.Type.CNAME,
RecordName.from("d.app1.tenant1.us-west-1-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-west-1.")),
new Record(Record.Type.CNAME,
RecordName.from("e.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-east-3.")))),
new TreeSet<>(records));
List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application())
.scope(Endpoint.Scope.application)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud",
"a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud",
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud",
"c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud",
"d.app1.tenant1.us-west-1-r.vespa.oath.cloud",
"e.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
endpointDnsNames);
}
@Test
void testDevDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]);
var context = tester.newDeploymentContext();
ZoneId zone = ZoneId.from("dev", "us-east-1");
tester.controllerTester().zoneRegistry()
.setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4);
context.runJob(zone, applicationPackage);
assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(),
"Application deployed and activated");
assertTrue(context.instanceJobs().isEmpty(),
"No job status added");
assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored");
Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone))
.asList()
.stream()
.map(Endpoint::routingMethod)
.collect(Collectors.toSet());
assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4));
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
tester.controller().applications().deactivate(context.instanceId(), zone);
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
}
@Test
void testDevDeploymentWithIncompatibleVersions() {
Version version1 = new Version("7");
Version version2 = new Version("7.5");
Version version3 = new Version("8");
var context = tester.newDeploymentContext();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
tester.controllerTester().upgradeSystem(version2);
tester.newDeploymentContext("keep", "v2", "alive").submit().deploy();
ZoneId zone = ZoneId.from("dev", "us-east-1");
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build());
assertEquals(version2, context.deployment(zone).version());
assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major that does not yet exist");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage());
}
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms are compatible with compile version 8", e.getMessage());
}
tester.controllerTester().upgradeSystem(version3);
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major which is incompatible with compile version");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage());
}
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
}
@Test
void testSuspension() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-east-3")
.build();
context.submit(applicationPackage).deploy();
DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3")));
assertFalse(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
tester.configServer().setSuspension(deployment1, true);
assertTrue(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
}
@Test
void testDeletingApplicationThatHasAlreadyBeenDeleted() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
context.submit(applicationPackage).runJob(zone, applicationPackage);
tester.controller().applications().deactivate(context.instanceId(), zone);
tester.controller().applications().deactivate(context.instanceId(), zone);
}
@Test
void testDeployApplicationWithWarnings() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from("prod", "us-west-1");
int warnings = 3;
tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings);
context.submit(applicationPackage).deploy();
assertEquals(warnings, context.deployment(zone)
.metrics().warnings().get(DeploymentMetrics.Warning.all).intValue());
}
@Test
void testDeploySelectivelyProvisionsCertificate() {
Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id());
var context1 = tester.newDeploymentContext("tenant1", "app1", "default");
var prodZone = ZoneId.from("prod", "us-west-1");
var stagingZone = ZoneId.from("staging", "us-east-3");
var testZone = ZoneId.from("test", "us-east-1");
tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone));
var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.region(prodZone.region())
.build();
context1.submit(applicationPackage).deploy();
var cert = certificate.apply(context1.instance());
assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod);
assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud",
"app1.tenant1.global.vespa.oath.cloud",
"*.app1.tenant1.global.vespa.oath.cloud"),
Stream.of(prodZone, testZone, stagingZone)
.flatMap(zone -> Stream.of("", "*.")
.map(prefix -> prefix + "app1.tenant1." + zone.region().value() +
(zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) +
".vespa.oath.cloud")))
.collect(Collectors.toUnmodifiableSet()),
Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId())));
context1.submit(applicationPackage).deploy();
assertEquals(cert, certificate.apply(context1.instance()));
var context2 = tester.newDeploymentContext("tenant1", "app2", "default");
var devZone = ZoneId.from("dev", "us-east-1");
context2.runJob(devZone, applicationPackage);
assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(),
"Application deployed and activated");
assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer");
}
@Test
void testDeployWithGlobalEndpointsInMultipleClouds() {
tester.controllerTester().zoneRegistry().setZones(
ZoneApiMock.fromId("test.us-west-1"),
ZoneApiMock.fromId("staging.us-west-1"),
ZoneApiMock.fromId("prod.us-west-1"),
ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build()
);
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("default", "default")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
var applicationPackage2 = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("aws", "default", "aws-us-east-1")
.endpoint("foo", "default", "aws-us-east-1", "us-west-1")
.build();
try {
context.submit(applicationPackage2);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
}
@Test
void testDeployWithoutSourceRevision() {
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.region("us-west-1")
.build();
context.submit(applicationPackage, Optional.empty())
.deploy();
assertEquals(1, context.instance().deployments().size(), "Deployed application");
}
@Test
void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.endpoint("east", "default", zone2.region().value())
.region(zone1.region())
.region(zone2.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4);
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive);
context.submit(applicationPackage).deploy();
var expectedRecords = List.of(
new Record(Record.Type.ALIAS,
RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"),
new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"),
"dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()),
new Record(Record.Type.ALIAS,
RecordName.from("east.application.tenant.global.vespa.oath.cloud"),
new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"),
"dns-zone-1", ZoneId.from("prod.us-east-3")).pack()),
new Record(Record.Type.CNAME,
RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"),
RecordData.from("lb-0--tenant.application.default--prod.us-east-3.")));
assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records()));
}
@Test
void testDeploymentDirectRouting() {
DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main));
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var zone3 = ZoneId.from("prod", "eu-west-1");
tester.controllerTester().zoneRegistry()
.exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3));
var applicationPackageBuilder = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.region(zone3.region())
.endpoint("default", "default")
.endpoint("foo", "qrs")
.endpoint("us", "default", zone1.region().value(), zone2.region().value())
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"));
context.submit(applicationPackageBuilder.build()).deploy();
for (var zone : List.of(zone1, zone2)) {
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud",
"us.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)),
"Expected container endpoints in " + zone);
}
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)),
"Expected container endpoints in " + zone3);
}
@Test
void testChangeEndpointCluster() {
var context = tester.newDeploymentContext();
var west = ZoneId.from("prod", "us-west-1");
var east = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region(west.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " +
"'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " +
"deployment.xml. Deploying given deployment.xml will remove " +
"[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " +
"[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " +
"<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " +
"https:
}
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
}
@Test
void testReadableApplications() {
var db = new MockCuratorDb(tester.controller().system());
var tester = new DeploymentTester(new ControllerTester(db));
var app1 = tester.newDeploymentContext("t1", "a1", "default")
.submit()
.deploy();
var app2 = tester.newDeploymentContext("t2", "a2", "default")
.submit()
.deploy();
assertEquals(2, tester.applications().readable().size());
db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()),
new byte[]{(byte) 0xDE, (byte) 0xAD});
assertEquals(1, tester.applications().readable().size());
try {
tester.applications().asList();
fail("Expected exception");
} catch (Exception ignored) {
}
app1.submit().deploy();
}
@Test
void testClashingEndpointIdAndInstanceName() {
String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" +
" <instance id=\"default\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"dev\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
" <instance id=\"dev\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"default\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
try {
tester.newDeploymentContext().submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'",
e.getMessage());
}
}
@Test
void testTestPackageWarnings() {
String deploymentXml = "<deployment version='1.0'>\n" +
" <prod>\n" +
" <region>us-west-1</region>\n" +
" </prod>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0]));
var app = tester.newDeploymentContext();
tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1);
assertEquals(List.of(new Notification(tester.clock().instant(),
Type.testPackage,
Level.warning,
NotificationSource.from(app.application().id()),
List.of("test package has staging tests, so it should also include staging setup",
"see https:
tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true));
}
@Test
void testCompileVersion() {
DeploymentContext context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build();
TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId());
Version version0 = Version.fromString("7.1");
tester.controllerTester().upgradeSystem(version0);
tester.upgrader().overrideConfidence(version0, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
context.submit(applicationPackage).deploy();
Version version1 = Version.fromString("7.2");
tester.controllerTester().upgradeSystem(version1);
tester.upgrader().overrideConfidence(version1, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().maintain();
context.deployPlatform(version1);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy();
TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app");
Version version2 = Version.fromString("8.0");
tester.controllerTester().upgradeSystem(version2);
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.broken);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals("no suitable, released compile version exists",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.empty()))
.getMessage());
assertEquals("no suitable, released compile version exists for specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
}
@Test
void testCloudAccount() {
DeploymentContext context = tester.newDeploymentContext();
ZoneId devZone = devUsEast1.zone();
ZoneId prodZone = productionUsWest1.zone();
String cloudAccount = "012345678912";
var applicationPackage = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone.region())
.build();
context.submit(applicationPackage).runJobExpectingFailure(systemTest, "Requested cloud account '012345678912' is not valid for tenant 'tenant'");
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
context.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'")
.abortJob(stagingTest);
tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone);
context.submit(applicationPackage).deploy();
tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), devZone);
context.runJob(devZone, applicationPackage);
for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) {
assertEquals(cloudAccount, tester.controllerTester().configServer()
.cloudAccount(context.deploymentIdIn(zoneId))
.get().value());
}
}
@Test
@Test
void testSubmitWithElementDeprecatedOnPreviousMajor() {
DeploymentContext context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.compileVersion(Version.fromString("8.1"))
.region("us-west-1")
.globalServiceId("qrs")
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7"));
}
}
} | class ControllerTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
void testDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.explicitEnvironment(Environment.dev, Environment.perf)
.region("us-west-1")
.region("us-east-3")
.build();
Version version1 = tester.configServer().initialVersion();
var context = tester.newDeploymentContext();
context.submit(applicationPackage);
assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)),
context.application().revisions().get(context.instance().change().revision().get()),
"Application version is known from completion of initial job");
context.runJob(systemTest);
context.runJob(stagingTest);
RevisionId applicationVersion = context.instance().change().revision().get();
assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment");
tester.triggerJobs();
tester.clock().advance(Duration.ofSeconds(1));
context.timeOutUpgrade(productionUsWest1);
assertEquals(4, context.instanceJobs().size());
tester.triggerJobs();
tester.controllerTester().createNewController();
assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1")));
assertNotNull(tester.controller().applications().requireInstance(context.instanceId()));
context.submit(applicationPackage);
context.runJob(systemTest);
context.runJob(stagingTest);
context.triggerJobs().jobAborted(productionUsWest1);
context.runJob(productionUsWest1);
tester.triggerJobs();
context.runJob(productionUsEast3);
assertEquals(4, context.instanceJobs().size());
applicationPackage = new ApplicationPackageBuilder()
.instances("hellO")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("deep-space-9")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("us-east-3")
.build();
try {
assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1")));
context.submit(applicationPackage);
fail("Expected exception due to illegal production deployment removal");
}
catch (IllegalArgumentException e) {
assertEquals("deployment-removal: application instance 'tenant.application.default' is deployed in us-west-1, " +
"but this instance and region combination is removed from deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval),
e.getMessage());
}
assertNotNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was not removed");
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.upgradePolicy("default")
.region("us-east-3")
.build();
context.submit(applicationPackage);
assertNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was removed");
assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed");
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.instanceId().tenant()));
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
assertNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.deploymentIdIn(productionUsWest1.zone())));
}
@Test
void testGlobalRotationStatus() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.build();
context.submit(applicationPackage).deploy();
var deployment1 = context.deploymentIdIn(zone1);
DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1);
RoutingStatus status1 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.in, status1.value());
routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator);
RoutingStatus status2 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.out, status2.value());
RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus();
assertEquals(RoutingStatus.Value.in, status3.value());
}
@Test
void testDnsUpdatesForGlobalEndpoint() {
var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta");
var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default");
ZoneId usWest = ZoneId.from("prod.us-west-1");
ZoneId usCentral = ZoneId.from("prod.us-central-1");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,default")
.endpoint("default", "foo")
.region(usWest.region())
.region(usCentral.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)),
RoutingMethod.sharedLayer4);
betaContext.submit(applicationPackage).deploy();
{
Collection<Deployment> betaDeployments = betaContext.instance().deployments().values();
assertFalse(betaDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("beta.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-01"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : betaDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints()
.get(betaContext.deploymentIdIn(deployment.zone())));
}
betaContext.flushDnsUpdates();
}
{
Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values();
assertFalse(defaultDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : defaultDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone())));
}
defaultContext.flushDnsUpdates();
}
Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.",
"app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02.");
rotationCnames.forEach((cname, data) -> {
var record = tester.controllerTester().findCname(cname);
assertTrue(record.isPresent());
assertEquals(cname, record.get().name().asString());
assertEquals(data, record.get().data().asString());
});
Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"),
defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud"));
globalDnsNamesByInstance.forEach((instance, dnsNames) -> {
Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance)
.scope(Endpoint.Scope.global)
.asList().stream()
.map(Endpoint::dnsName)
.collect(Collectors.toSet());
assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance);
});
}
@Test
void testDnsUpdatesForGlobalEndpointLegacySyntax() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
for (Deployment deployment : deployments) {
assertEquals(Set.of("rotation-id-01",
"app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(1, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId())
.scope(Endpoint.Scope.global)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"),
globalDnsNames);
}
@Test
void testDnsUpdatesForMultipleGlobalEndpoints() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("foobar", "qrs", "us-west-1", "us-central-1")
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.endpoint("all", "qrs")
.endpoint("west", "qrs", "us-west-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
var notWest = Set.of(
"rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud"
);
var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud"));
for (Deployment deployment : deployments) {
assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest,
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(4, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record1.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record2.isPresent());
assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record3.isPresent());
assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString());
assertEquals("rotation-fqdn-03.", record3.get().data().asString());
var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record4.isPresent());
assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString());
assertEquals("rotation-fqdn-04.", record4.get().data().asString());
}
@Test
void testDnsUpdatesForGlobalEndpointChanges() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
var west = ZoneId.from("prod", "us-west-1");
var central = ZoneId.from("prod", "us-central-1");
var east = ZoneId.from("prod", "us-east-3");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage2).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
assertEquals(
Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(east))
,
"Zone " + east + " is a member of global endpoint");
ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage3).deploy();
for (var zone : List.of(west, central, east)) {
assertEquals(
zone.equals(east)
? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud")
: Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage4);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " +
"and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage5);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage6);
}
@Test
void testUnassignRotations() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-central-1")
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage2).deploy();
assertEquals(List.of(), context.instance().rotations());
assertEquals(
Set.of(),
tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1")))
);
}
@Test
void testDnsUpdatesWithChangeInRotationAssignment() {
String dnsName1 = "app1.tenant1.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
{
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isPresent());
assertEquals(dnsName1, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage);
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.application().id().tenant()));
try (RotationLock lock = tester.controller().routing().rotations().lock()) {
assertTrue(tester.controller().routing().rotations().availableRotations(lock)
.containsKey(new RotationId("rotation-id-01")),
"Rotation is unassigned");
}
context.flushDnsUpdates();
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isEmpty(), dnsName1 + " is removed");
}
String dnsName2 = "app2.tenant2.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant2", "app2", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
var record = tester.controllerTester().findCname(dnsName2);
assertTrue(record.isPresent());
assertEquals(dnsName2, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString());
assertEquals(2, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname(dnsName1);
assertTrue(record1.isPresent());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname(dnsName2);
assertTrue(record2.isPresent());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
}
}
@Test
void testDnsUpdatesForApplicationEndpoint() {
ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta");
ApplicationId main = ApplicationId.from("tenant1", "app1", "main");
var context = tester.newDeploymentContext(beta);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region("us-east-3")
.region("us-west-1")
.region("aws-us-east-1a")
.region("aws-us-east-1b")
.applicationEndpoint("a", "default",
Map.of("aws-us-east-1a", Map.of(beta.instance(), 2,
main.instance(), 8),
"aws-us-east-1b", Map.of(main.instance(), 1)))
.applicationEndpoint("b", "default", "aws-us-east-1a",
Map.of(beta.instance(), 1,
main.instance(), 1))
.applicationEndpoint("c", "default", "aws-us-east-1b",
Map.of(beta.instance(), 4))
.applicationEndpoint("d", "default", "us-west-1",
Map.of(main.instance(), 7,
beta.instance(), 3))
.applicationEndpoint("e", "default", "us-east-3",
Map.of(main.instance(), 3))
.build();
context.submit(applicationPackage).deploy();
ZoneId east3 = ZoneId.from("prod", "us-east-3");
ZoneId west1 = ZoneId.from("prod", "us-west-1");
ZoneId east1a = ZoneId.from("prod", "aws-us-east-1a");
ZoneId east1b = ZoneId.from("prod", "aws-us-east-1b");
Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of(
new DeploymentId(beta, east3), Map.of(),
new DeploymentId(main, east3), Map.of("e.app1.tenant1.us-east-3-r.vespa.oath.cloud", 3),
new DeploymentId(beta, west1), Map.of("d.app1.tenant1.us-west-1-r.vespa.oath.cloud", 3),
new DeploymentId(main, west1), Map.of("d.app1.tenant1.us-west-1-r.vespa.oath.cloud", 7),
new DeploymentId(beta, east1a), Map.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 2,
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 1),
new DeploymentId(main, east1a), Map.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 8,
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 1),
new DeploymentId(beta, east1b), Map.of("c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud", 4),
new DeploymentId(main, east1b), Map.of("a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud", 1)
);
deploymentEndpoints.forEach((deployment, endpoints) -> {
Set<ContainerEndpoint> expected = endpoints.entrySet().stream()
.map(kv -> new ContainerEndpoint("default", "application",
List.of(kv.getKey()),
OptionalInt.of(kv.getValue()),
tester.controller().zoneRegistry().routingMethod(deployment.zoneId())))
.collect(Collectors.toSet());
assertEquals(expected,
tester.configServer().containerEndpoints().get(deployment),
"Endpoint names for " + deployment + " are passed to config server");
});
context.flushDnsUpdates();
Set<Record> records = tester.controllerTester().nameService().records();
assertEquals(new TreeSet<>(Set.of(new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1b.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1b.")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/2")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/8")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/4")),
new Record(Record.Type.CNAME,
RecordName.from("d.app1.tenant1.us-west-1-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-west-1.")),
new Record(Record.Type.CNAME,
RecordName.from("e.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-east-3.")))),
new TreeSet<>(records));
List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application())
.scope(Endpoint.Scope.application)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud",
"a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud",
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud",
"c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud",
"d.app1.tenant1.us-west-1-r.vespa.oath.cloud",
"e.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
endpointDnsNames);
}
@Test
void testDevDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]);
var context = tester.newDeploymentContext();
ZoneId zone = ZoneId.from("dev", "us-east-1");
tester.controllerTester().zoneRegistry()
.setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4);
context.runJob(zone, applicationPackage);
assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(),
"Application deployed and activated");
assertTrue(context.instanceJobs().isEmpty(),
"No job status added");
assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored");
Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone))
.asList()
.stream()
.map(Endpoint::routingMethod)
.collect(Collectors.toSet());
assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4));
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
tester.controller().applications().deactivate(context.instanceId(), zone);
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
}
@Test
void testDevDeploymentWithIncompatibleVersions() {
Version version1 = new Version("7");
Version version2 = new Version("7.5");
Version version3 = new Version("8");
var context = tester.newDeploymentContext();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
tester.controllerTester().upgradeSystem(version2);
tester.newDeploymentContext("keep", "v2", "alive").submit().deploy();
ZoneId zone = ZoneId.from("dev", "us-east-1");
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build());
assertEquals(version2, context.deployment(zone).version());
assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major that does not yet exist");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage());
}
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms are compatible with compile version 8", e.getMessage());
}
tester.controllerTester().upgradeSystem(version3);
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major which is incompatible with compile version");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage());
}
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
}
@Test
void testSuspension() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-east-3")
.build();
context.submit(applicationPackage).deploy();
DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3")));
assertFalse(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
tester.configServer().setSuspension(deployment1, true);
assertTrue(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
}
@Test
void testDeletingApplicationThatHasAlreadyBeenDeleted() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
context.submit(applicationPackage).runJob(zone, applicationPackage);
tester.controller().applications().deactivate(context.instanceId(), zone);
tester.controller().applications().deactivate(context.instanceId(), zone);
}
@Test
void testDeployApplicationWithWarnings() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from("prod", "us-west-1");
int warnings = 3;
tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings);
context.submit(applicationPackage).deploy();
assertEquals(warnings, context.deployment(zone)
.metrics().warnings().get(DeploymentMetrics.Warning.all).intValue());
}
@Test
void testDeploySelectivelyProvisionsCertificate() {
Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id());
var context1 = tester.newDeploymentContext("tenant1", "app1", "default");
var prodZone = ZoneId.from("prod", "us-west-1");
var stagingZone = ZoneId.from("staging", "us-east-3");
var testZone = ZoneId.from("test", "us-east-1");
tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone));
var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.region(prodZone.region())
.build();
context1.submit(applicationPackage).deploy();
var cert = certificate.apply(context1.instance());
assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod);
assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud",
"app1.tenant1.global.vespa.oath.cloud",
"*.app1.tenant1.global.vespa.oath.cloud"),
Stream.of(prodZone, testZone, stagingZone)
.flatMap(zone -> Stream.of("", "*.")
.map(prefix -> prefix + "app1.tenant1." + zone.region().value() +
(zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) +
".vespa.oath.cloud")))
.collect(Collectors.toUnmodifiableSet()),
Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId())));
context1.submit(applicationPackage).deploy();
assertEquals(cert, certificate.apply(context1.instance()));
var context2 = tester.newDeploymentContext("tenant1", "app2", "default");
var devZone = ZoneId.from("dev", "us-east-1");
context2.runJob(devZone, applicationPackage);
assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(),
"Application deployed and activated");
assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer");
}
@Test
void testDeployWithGlobalEndpointsInMultipleClouds() {
tester.controllerTester().zoneRegistry().setZones(
ZoneApiMock.fromId("test.us-west-1"),
ZoneApiMock.fromId("staging.us-west-1"),
ZoneApiMock.fromId("prod.us-west-1"),
ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build()
);
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("default", "default")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
var applicationPackage2 = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("aws", "default", "aws-us-east-1")
.endpoint("foo", "default", "aws-us-east-1", "us-west-1")
.build();
try {
context.submit(applicationPackage2);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
}
@Test
void testDeployWithoutSourceRevision() {
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.region("us-west-1")
.build();
context.submit(applicationPackage, Optional.empty())
.deploy();
assertEquals(1, context.instance().deployments().size(), "Deployed application");
}
@Test
void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.endpoint("east", "default", zone2.region().value())
.region(zone1.region())
.region(zone2.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4);
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive);
context.submit(applicationPackage).deploy();
var expectedRecords = List.of(
new Record(Record.Type.ALIAS,
RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"),
new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"),
"dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()),
new Record(Record.Type.ALIAS,
RecordName.from("east.application.tenant.global.vespa.oath.cloud"),
new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"),
"dns-zone-1", ZoneId.from("prod.us-east-3")).pack()),
new Record(Record.Type.CNAME,
RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"),
RecordData.from("lb-0--tenant.application.default--prod.us-east-3.")));
assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records()));
}
@Test
void testDeploymentDirectRouting() {
DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main));
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var zone3 = ZoneId.from("prod", "eu-west-1");
tester.controllerTester().zoneRegistry()
.exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3));
var applicationPackageBuilder = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.region(zone3.region())
.endpoint("default", "default")
.endpoint("foo", "qrs")
.endpoint("us", "default", zone1.region().value(), zone2.region().value())
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"));
context.submit(applicationPackageBuilder.build()).deploy();
for (var zone : List.of(zone1, zone2)) {
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud",
"us.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)),
"Expected container endpoints in " + zone);
}
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)),
"Expected container endpoints in " + zone3);
}
@Test
void testChangeEndpointCluster() {
var context = tester.newDeploymentContext();
var west = ZoneId.from("prod", "us-west-1");
var east = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region(west.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " +
"'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " +
"deployment.xml. Deploying given deployment.xml will remove " +
"[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " +
"[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " +
"<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " +
"https:
}
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
}
@Test
void testReadableApplications() {
var db = new MockCuratorDb(tester.controller().system());
var tester = new DeploymentTester(new ControllerTester(db));
var app1 = tester.newDeploymentContext("t1", "a1", "default")
.submit()
.deploy();
var app2 = tester.newDeploymentContext("t2", "a2", "default")
.submit()
.deploy();
assertEquals(2, tester.applications().readable().size());
db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()),
new byte[]{(byte) 0xDE, (byte) 0xAD});
assertEquals(1, tester.applications().readable().size());
try {
tester.applications().asList();
fail("Expected exception");
} catch (Exception ignored) {
}
app1.submit().deploy();
}
@Test
void testClashingEndpointIdAndInstanceName() {
String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" +
" <instance id=\"default\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"dev\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
" <instance id=\"dev\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"default\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
try {
tester.newDeploymentContext().submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'",
e.getMessage());
}
}
@Test
void testTestPackageWarnings() {
String deploymentXml = "<deployment version='1.0'>\n" +
" <prod>\n" +
" <region>us-west-1</region>\n" +
" </prod>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0]));
var app = tester.newDeploymentContext();
tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1);
assertEquals(List.of(new Notification(tester.clock().instant(),
Type.testPackage,
Level.warning,
NotificationSource.from(app.application().id()),
List.of("test package has staging tests, so it should also include staging setup",
"see https:
tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true));
}
@Test
void testCompileVersion() {
DeploymentContext context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build();
TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId());
Version version0 = Version.fromString("7.1");
tester.controllerTester().upgradeSystem(version0);
tester.upgrader().overrideConfidence(version0, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
context.submit(applicationPackage).deploy();
Version version1 = Version.fromString("7.2");
tester.controllerTester().upgradeSystem(version1);
tester.upgrader().overrideConfidence(version1, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().maintain();
context.deployPlatform(version1);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy();
TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app");
Version version2 = Version.fromString("8.0");
tester.controllerTester().upgradeSystem(version2);
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.broken);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals("no suitable, released compile version exists",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.empty()))
.getMessage());
assertEquals("no suitable, released compile version exists for specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
}
@Test
void testCloudAccount() {
DeploymentContext context = tester.newDeploymentContext();
ZoneId devZone = devUsEast1.zone();
ZoneId prodZone = productionUsWest1.zone();
String cloudAccount = "012345678912";
var applicationPackage = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone.region())
.build();
context.submit(applicationPackage).runJobExpectingFailure(systemTest, "Requested cloud account '012345678912' is not valid for tenant 'tenant'");
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
context.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'")
.abortJob(stagingTest);
tester.controllerTester().zoneRegistry().configureCloudAccount(CloudAccount.from(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone);
context.submit(applicationPackage).deploy();
tester.controllerTester().zoneRegistry().configureCloudAccount(CloudAccount.from(cloudAccount), devZone);
context.runJob(devZone, applicationPackage);
for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) {
assertEquals(cloudAccount, tester.controllerTester().configServer()
.cloudAccount(context.deploymentIdIn(zoneId))
.get().value());
}
}
@Test
@Test
void testSubmitWithElementDeprecatedOnPreviousMajor() {
DeploymentContext context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.compileVersion(Version.fromString("8.1"))
.region("us-west-1")
.globalServiceId("qrs")
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7"));
}
}
} | |
The `012345678912` account is used for `prodZone1` and access to do this must be granted through the feature flag. | void testCloudAccountWithDefaultOverride() {
var context = tester.newDeploymentContext();
var prodZone1 = productionUsEast3.zone();
var prodZone2 = productionUsWest1.zone();
var cloudAccount = "012345678912";
var application = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone1.region())
.region(prodZone2.region(), "default")
.build();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone1);
context.submit(application).deploy();
assertEquals(cloudAccount, tester.controllerTester().configServer().cloudAccount(context.deploymentIdIn(prodZone1)).get().value());
assertEquals(Optional.empty(), tester.controllerTester().configServer().cloudAccount(context.deploymentIdIn(prodZone2)));
} | void testCloudAccountWithDefaultOverride() {
var context = tester.newDeploymentContext();
var prodZone1 = productionUsEast3.zone();
var prodZone2 = productionUsWest1.zone();
var cloudAccount = "012345678912";
var application = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone1.region())
.region(prodZone2.region(), "default")
.build();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
tester.controllerTester().zoneRegistry().configureCloudAccount(CloudAccount.from(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone1);
context.submit(application).deploy();
assertEquals(cloudAccount, tester.controllerTester().configServer().cloudAccount(context.deploymentIdIn(prodZone1)).get().value());
assertEquals(Optional.empty(), tester.controllerTester().configServer().cloudAccount(context.deploymentIdIn(prodZone2)));
} | class ControllerTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
void testDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.explicitEnvironment(Environment.dev, Environment.perf)
.region("us-west-1")
.region("us-east-3")
.build();
Version version1 = tester.configServer().initialVersion();
var context = tester.newDeploymentContext();
context.submit(applicationPackage);
assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)),
context.application().revisions().get(context.instance().change().revision().get()),
"Application version is known from completion of initial job");
context.runJob(systemTest);
context.runJob(stagingTest);
RevisionId applicationVersion = context.instance().change().revision().get();
assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment");
tester.triggerJobs();
tester.clock().advance(Duration.ofSeconds(1));
context.timeOutUpgrade(productionUsWest1);
assertEquals(4, context.instanceJobs().size());
tester.triggerJobs();
tester.controllerTester().createNewController();
assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1")));
assertNotNull(tester.controller().applications().requireInstance(context.instanceId()));
context.submit(applicationPackage);
context.runJob(systemTest);
context.runJob(stagingTest);
context.triggerJobs().jobAborted(productionUsWest1);
context.runJob(productionUsWest1);
tester.triggerJobs();
context.runJob(productionUsEast3);
assertEquals(4, context.instanceJobs().size());
applicationPackage = new ApplicationPackageBuilder()
.instances("hellO")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("deep-space-9")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("us-east-3")
.build();
try {
assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1")));
context.submit(applicationPackage);
fail("Expected exception due to illegal production deployment removal");
}
catch (IllegalArgumentException e) {
assertEquals("deployment-removal: application instance 'tenant.application.default' is deployed in us-west-1, " +
"but this instance and region combination is removed from deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval),
e.getMessage());
}
assertNotNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was not removed");
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.upgradePolicy("default")
.region("us-east-3")
.build();
context.submit(applicationPackage);
assertNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was removed");
assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed");
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.instanceId().tenant()));
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
assertNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.deploymentIdIn(productionUsWest1.zone())));
}
@Test
void testGlobalRotationStatus() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.build();
context.submit(applicationPackage).deploy();
var deployment1 = context.deploymentIdIn(zone1);
DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1);
RoutingStatus status1 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.in, status1.value());
routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator);
RoutingStatus status2 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.out, status2.value());
RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus();
assertEquals(RoutingStatus.Value.in, status3.value());
}
@Test
void testDnsUpdatesForGlobalEndpoint() {
var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta");
var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default");
ZoneId usWest = ZoneId.from("prod.us-west-1");
ZoneId usCentral = ZoneId.from("prod.us-central-1");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,default")
.endpoint("default", "foo")
.region(usWest.region())
.region(usCentral.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)),
RoutingMethod.sharedLayer4);
betaContext.submit(applicationPackage).deploy();
{
Collection<Deployment> betaDeployments = betaContext.instance().deployments().values();
assertFalse(betaDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("beta.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-01"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : betaDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints()
.get(betaContext.deploymentIdIn(deployment.zone())));
}
betaContext.flushDnsUpdates();
}
{
Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values();
assertFalse(defaultDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : defaultDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone())));
}
defaultContext.flushDnsUpdates();
}
Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.",
"app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02.");
rotationCnames.forEach((cname, data) -> {
var record = tester.controllerTester().findCname(cname);
assertTrue(record.isPresent());
assertEquals(cname, record.get().name().asString());
assertEquals(data, record.get().data().asString());
});
Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"),
defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud"));
globalDnsNamesByInstance.forEach((instance, dnsNames) -> {
Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance)
.scope(Endpoint.Scope.global)
.asList().stream()
.map(Endpoint::dnsName)
.collect(Collectors.toSet());
assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance);
});
}
@Test
void testDnsUpdatesForGlobalEndpointLegacySyntax() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
for (Deployment deployment : deployments) {
assertEquals(Set.of("rotation-id-01",
"app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(1, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId())
.scope(Endpoint.Scope.global)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"),
globalDnsNames);
}
@Test
void testDnsUpdatesForMultipleGlobalEndpoints() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("foobar", "qrs", "us-west-1", "us-central-1")
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.endpoint("all", "qrs")
.endpoint("west", "qrs", "us-west-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
var notWest = Set.of(
"rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud"
);
var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud"));
for (Deployment deployment : deployments) {
assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest,
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(4, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record1.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record2.isPresent());
assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record3.isPresent());
assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString());
assertEquals("rotation-fqdn-03.", record3.get().data().asString());
var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record4.isPresent());
assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString());
assertEquals("rotation-fqdn-04.", record4.get().data().asString());
}
@Test
void testDnsUpdatesForGlobalEndpointChanges() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
var west = ZoneId.from("prod", "us-west-1");
var central = ZoneId.from("prod", "us-central-1");
var east = ZoneId.from("prod", "us-east-3");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage2).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
assertEquals(
Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(east))
,
"Zone " + east + " is a member of global endpoint");
ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage3).deploy();
for (var zone : List.of(west, central, east)) {
assertEquals(
zone.equals(east)
? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud")
: Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage4);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " +
"and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage5);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage6);
}
@Test
void testUnassignRotations() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-central-1")
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage2).deploy();
assertEquals(List.of(), context.instance().rotations());
assertEquals(
Set.of(),
tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1")))
);
}
@Test
void testDnsUpdatesWithChangeInRotationAssignment() {
String dnsName1 = "app1.tenant1.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
{
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isPresent());
assertEquals(dnsName1, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage);
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.application().id().tenant()));
try (RotationLock lock = tester.controller().routing().rotations().lock()) {
assertTrue(tester.controller().routing().rotations().availableRotations(lock)
.containsKey(new RotationId("rotation-id-01")),
"Rotation is unassigned");
}
context.flushDnsUpdates();
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isEmpty(), dnsName1 + " is removed");
}
String dnsName2 = "app2.tenant2.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant2", "app2", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
var record = tester.controllerTester().findCname(dnsName2);
assertTrue(record.isPresent());
assertEquals(dnsName2, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString());
assertEquals(2, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname(dnsName1);
assertTrue(record1.isPresent());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname(dnsName2);
assertTrue(record2.isPresent());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
}
}
@Test
void testDnsUpdatesForApplicationEndpoint() {
ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta");
ApplicationId main = ApplicationId.from("tenant1", "app1", "main");
var context = tester.newDeploymentContext(beta);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region("us-east-3")
.region("us-west-1")
.region("aws-us-east-1a")
.region("aws-us-east-1b")
.applicationEndpoint("a", "default",
Map.of("aws-us-east-1a", Map.of(beta.instance(), 2,
main.instance(), 8),
"aws-us-east-1b", Map.of(main.instance(), 1)))
.applicationEndpoint("b", "default", "aws-us-east-1a",
Map.of(beta.instance(), 1,
main.instance(), 1))
.applicationEndpoint("c", "default", "aws-us-east-1b",
Map.of(beta.instance(), 4))
.applicationEndpoint("d", "default", "us-west-1",
Map.of(main.instance(), 7,
beta.instance(), 3))
.applicationEndpoint("e", "default", "us-east-3",
Map.of(main.instance(), 3))
.build();
context.submit(applicationPackage).deploy();
ZoneId east3 = ZoneId.from("prod", "us-east-3");
ZoneId west1 = ZoneId.from("prod", "us-west-1");
ZoneId east1a = ZoneId.from("prod", "aws-us-east-1a");
ZoneId east1b = ZoneId.from("prod", "aws-us-east-1b");
Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of(
new DeploymentId(beta, east3), Map.of(),
new DeploymentId(main, east3), Map.of("e.app1.tenant1.us-east-3-r.vespa.oath.cloud", 3),
new DeploymentId(beta, west1), Map.of("d.app1.tenant1.us-west-1-r.vespa.oath.cloud", 3),
new DeploymentId(main, west1), Map.of("d.app1.tenant1.us-west-1-r.vespa.oath.cloud", 7),
new DeploymentId(beta, east1a), Map.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 2,
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 1),
new DeploymentId(main, east1a), Map.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 8,
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 1),
new DeploymentId(beta, east1b), Map.of("c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud", 4),
new DeploymentId(main, east1b), Map.of("a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud", 1)
);
deploymentEndpoints.forEach((deployment, endpoints) -> {
Set<ContainerEndpoint> expected = endpoints.entrySet().stream()
.map(kv -> new ContainerEndpoint("default", "application",
List.of(kv.getKey()),
OptionalInt.of(kv.getValue()),
tester.controller().zoneRegistry().routingMethod(deployment.zoneId())))
.collect(Collectors.toSet());
assertEquals(expected,
tester.configServer().containerEndpoints().get(deployment),
"Endpoint names for " + deployment + " are passed to config server");
});
context.flushDnsUpdates();
Set<Record> records = tester.controllerTester().nameService().records();
assertEquals(new TreeSet<>(Set.of(new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1b.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1b.")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/2")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/8")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/4")),
new Record(Record.Type.CNAME,
RecordName.from("d.app1.tenant1.us-west-1-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-west-1.")),
new Record(Record.Type.CNAME,
RecordName.from("e.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-east-3.")))),
new TreeSet<>(records));
List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application())
.scope(Endpoint.Scope.application)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud",
"a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud",
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud",
"c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud",
"d.app1.tenant1.us-west-1-r.vespa.oath.cloud",
"e.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
endpointDnsNames);
}
@Test
void testDevDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]);
var context = tester.newDeploymentContext();
ZoneId zone = ZoneId.from("dev", "us-east-1");
tester.controllerTester().zoneRegistry()
.setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4);
context.runJob(zone, applicationPackage);
assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(),
"Application deployed and activated");
assertTrue(context.instanceJobs().isEmpty(),
"No job status added");
assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored");
Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone))
.asList()
.stream()
.map(Endpoint::routingMethod)
.collect(Collectors.toSet());
assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4));
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
tester.controller().applications().deactivate(context.instanceId(), zone);
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
}
@Test
void testDevDeploymentWithIncompatibleVersions() {
Version version1 = new Version("7");
Version version2 = new Version("7.5");
Version version3 = new Version("8");
var context = tester.newDeploymentContext();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
tester.controllerTester().upgradeSystem(version2);
tester.newDeploymentContext("keep", "v2", "alive").submit().deploy();
ZoneId zone = ZoneId.from("dev", "us-east-1");
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build());
assertEquals(version2, context.deployment(zone).version());
assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major that does not yet exist");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage());
}
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms are compatible with compile version 8", e.getMessage());
}
tester.controllerTester().upgradeSystem(version3);
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major which is incompatible with compile version");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage());
}
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
}
@Test
void testSuspension() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-east-3")
.build();
context.submit(applicationPackage).deploy();
DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3")));
assertFalse(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
tester.configServer().setSuspension(deployment1, true);
assertTrue(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
}
@Test
void testDeletingApplicationThatHasAlreadyBeenDeleted() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
context.submit(applicationPackage).runJob(zone, applicationPackage);
tester.controller().applications().deactivate(context.instanceId(), zone);
tester.controller().applications().deactivate(context.instanceId(), zone);
}
@Test
void testDeployApplicationWithWarnings() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from("prod", "us-west-1");
int warnings = 3;
tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings);
context.submit(applicationPackage).deploy();
assertEquals(warnings, context.deployment(zone)
.metrics().warnings().get(DeploymentMetrics.Warning.all).intValue());
}
@Test
void testDeploySelectivelyProvisionsCertificate() {
Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id());
var context1 = tester.newDeploymentContext("tenant1", "app1", "default");
var prodZone = ZoneId.from("prod", "us-west-1");
var stagingZone = ZoneId.from("staging", "us-east-3");
var testZone = ZoneId.from("test", "us-east-1");
tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone));
var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.region(prodZone.region())
.build();
context1.submit(applicationPackage).deploy();
var cert = certificate.apply(context1.instance());
assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod);
assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud",
"app1.tenant1.global.vespa.oath.cloud",
"*.app1.tenant1.global.vespa.oath.cloud"),
Stream.of(prodZone, testZone, stagingZone)
.flatMap(zone -> Stream.of("", "*.")
.map(prefix -> prefix + "app1.tenant1." + zone.region().value() +
(zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) +
".vespa.oath.cloud")))
.collect(Collectors.toUnmodifiableSet()),
Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId())));
context1.submit(applicationPackage).deploy();
assertEquals(cert, certificate.apply(context1.instance()));
var context2 = tester.newDeploymentContext("tenant1", "app2", "default");
var devZone = ZoneId.from("dev", "us-east-1");
context2.runJob(devZone, applicationPackage);
assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(),
"Application deployed and activated");
assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer");
}
@Test
void testDeployWithGlobalEndpointsInMultipleClouds() {
tester.controllerTester().zoneRegistry().setZones(
ZoneApiMock.fromId("test.us-west-1"),
ZoneApiMock.fromId("staging.us-west-1"),
ZoneApiMock.fromId("prod.us-west-1"),
ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build()
);
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("default", "default")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
var applicationPackage2 = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("aws", "default", "aws-us-east-1")
.endpoint("foo", "default", "aws-us-east-1", "us-west-1")
.build();
try {
context.submit(applicationPackage2);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
}
@Test
void testDeployWithoutSourceRevision() {
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.region("us-west-1")
.build();
context.submit(applicationPackage, Optional.empty())
.deploy();
assertEquals(1, context.instance().deployments().size(), "Deployed application");
}
@Test
void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.endpoint("east", "default", zone2.region().value())
.region(zone1.region())
.region(zone2.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4);
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive);
context.submit(applicationPackage).deploy();
var expectedRecords = List.of(
new Record(Record.Type.ALIAS,
RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"),
new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"),
"dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()),
new Record(Record.Type.ALIAS,
RecordName.from("east.application.tenant.global.vespa.oath.cloud"),
new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"),
"dns-zone-1", ZoneId.from("prod.us-east-3")).pack()),
new Record(Record.Type.CNAME,
RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"),
RecordData.from("lb-0--tenant.application.default--prod.us-east-3.")));
assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records()));
}
@Test
void testDeploymentDirectRouting() {
DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main));
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var zone3 = ZoneId.from("prod", "eu-west-1");
tester.controllerTester().zoneRegistry()
.exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3));
var applicationPackageBuilder = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.region(zone3.region())
.endpoint("default", "default")
.endpoint("foo", "qrs")
.endpoint("us", "default", zone1.region().value(), zone2.region().value())
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"));
context.submit(applicationPackageBuilder.build()).deploy();
for (var zone : List.of(zone1, zone2)) {
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud",
"us.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)),
"Expected container endpoints in " + zone);
}
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)),
"Expected container endpoints in " + zone3);
}
@Test
void testChangeEndpointCluster() {
var context = tester.newDeploymentContext();
var west = ZoneId.from("prod", "us-west-1");
var east = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region(west.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " +
"'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " +
"deployment.xml. Deploying given deployment.xml will remove " +
"[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " +
"[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " +
"<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " +
"https:
}
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
}
@Test
void testReadableApplications() {
var db = new MockCuratorDb(tester.controller().system());
var tester = new DeploymentTester(new ControllerTester(db));
var app1 = tester.newDeploymentContext("t1", "a1", "default")
.submit()
.deploy();
var app2 = tester.newDeploymentContext("t2", "a2", "default")
.submit()
.deploy();
assertEquals(2, tester.applications().readable().size());
db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()),
new byte[]{(byte) 0xDE, (byte) 0xAD});
assertEquals(1, tester.applications().readable().size());
try {
tester.applications().asList();
fail("Expected exception");
} catch (Exception ignored) {
}
app1.submit().deploy();
}
@Test
void testClashingEndpointIdAndInstanceName() {
String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" +
" <instance id=\"default\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"dev\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
" <instance id=\"dev\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"default\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
try {
tester.newDeploymentContext().submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'",
e.getMessage());
}
}
@Test
void testTestPackageWarnings() {
String deploymentXml = "<deployment version='1.0'>\n" +
" <prod>\n" +
" <region>us-west-1</region>\n" +
" </prod>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0]));
var app = tester.newDeploymentContext();
tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1);
assertEquals(List.of(new Notification(tester.clock().instant(),
Type.testPackage,
Level.warning,
NotificationSource.from(app.application().id()),
List.of("test package has staging tests, so it should also include staging setup",
"see https:
tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true));
}
@Test
void testCompileVersion() {
DeploymentContext context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build();
TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId());
Version version0 = Version.fromString("7.1");
tester.controllerTester().upgradeSystem(version0);
tester.upgrader().overrideConfidence(version0, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
context.submit(applicationPackage).deploy();
Version version1 = Version.fromString("7.2");
tester.controllerTester().upgradeSystem(version1);
tester.upgrader().overrideConfidence(version1, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().maintain();
context.deployPlatform(version1);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy();
TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app");
Version version2 = Version.fromString("8.0");
tester.controllerTester().upgradeSystem(version2);
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.broken);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals("no suitable, released compile version exists",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.empty()))
.getMessage());
assertEquals("no suitable, released compile version exists for specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
}
@Test
void testCloudAccount() {
DeploymentContext context = tester.newDeploymentContext();
ZoneId devZone = devUsEast1.zone();
ZoneId prodZone = productionUsWest1.zone();
String cloudAccount = "012345678912";
var applicationPackage = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone.region())
.build();
context.submit(applicationPackage).runJobExpectingFailure(systemTest, "Requested cloud account '012345678912' is not valid for tenant 'tenant'");
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
context.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'")
.abortJob(stagingTest);
tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone);
context.submit(applicationPackage).deploy();
tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), devZone);
context.runJob(devZone, applicationPackage);
for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) {
assertEquals(cloudAccount, tester.controllerTester().configServer()
.cloudAccount(context.deploymentIdIn(zoneId))
.get().value());
}
}
@Test
@Test
void testSubmitWithElementDeprecatedOnPreviousMajor() {
DeploymentContext context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.compileVersion(Version.fromString("8.1"))
.region("us-west-1")
.globalServiceId("qrs")
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7"));
}
}
} | class ControllerTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
void testDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.explicitEnvironment(Environment.dev, Environment.perf)
.region("us-west-1")
.region("us-east-3")
.build();
Version version1 = tester.configServer().initialVersion();
var context = tester.newDeploymentContext();
context.submit(applicationPackage);
assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)),
context.application().revisions().get(context.instance().change().revision().get()),
"Application version is known from completion of initial job");
context.runJob(systemTest);
context.runJob(stagingTest);
RevisionId applicationVersion = context.instance().change().revision().get();
assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment");
tester.triggerJobs();
tester.clock().advance(Duration.ofSeconds(1));
context.timeOutUpgrade(productionUsWest1);
assertEquals(4, context.instanceJobs().size());
tester.triggerJobs();
tester.controllerTester().createNewController();
assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1")));
assertNotNull(tester.controller().applications().requireInstance(context.instanceId()));
context.submit(applicationPackage);
context.runJob(systemTest);
context.runJob(stagingTest);
context.triggerJobs().jobAborted(productionUsWest1);
context.runJob(productionUsWest1);
tester.triggerJobs();
context.runJob(productionUsEast3);
assertEquals(4, context.instanceJobs().size());
applicationPackage = new ApplicationPackageBuilder()
.instances("hellO")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("deep-space-9")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("us-east-3")
.build();
try {
assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1")));
context.submit(applicationPackage);
fail("Expected exception due to illegal production deployment removal");
}
catch (IllegalArgumentException e) {
assertEquals("deployment-removal: application instance 'tenant.application.default' is deployed in us-west-1, " +
"but this instance and region combination is removed from deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval),
e.getMessage());
}
assertNotNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was not removed");
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.upgradePolicy("default")
.region("us-east-3")
.build();
context.submit(applicationPackage);
assertNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was removed");
assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed");
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.instanceId().tenant()));
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
assertNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.deploymentIdIn(productionUsWest1.zone())));
}
@Test
void testGlobalRotationStatus() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.build();
context.submit(applicationPackage).deploy();
var deployment1 = context.deploymentIdIn(zone1);
DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1);
RoutingStatus status1 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.in, status1.value());
routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator);
RoutingStatus status2 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.out, status2.value());
RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus();
assertEquals(RoutingStatus.Value.in, status3.value());
}
@Test
void testDnsUpdatesForGlobalEndpoint() {
var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta");
var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default");
ZoneId usWest = ZoneId.from("prod.us-west-1");
ZoneId usCentral = ZoneId.from("prod.us-central-1");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,default")
.endpoint("default", "foo")
.region(usWest.region())
.region(usCentral.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)),
RoutingMethod.sharedLayer4);
betaContext.submit(applicationPackage).deploy();
{
Collection<Deployment> betaDeployments = betaContext.instance().deployments().values();
assertFalse(betaDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("beta.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-01"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : betaDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints()
.get(betaContext.deploymentIdIn(deployment.zone())));
}
betaContext.flushDnsUpdates();
}
{
Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values();
assertFalse(defaultDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : defaultDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone())));
}
defaultContext.flushDnsUpdates();
}
Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.",
"app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02.");
rotationCnames.forEach((cname, data) -> {
var record = tester.controllerTester().findCname(cname);
assertTrue(record.isPresent());
assertEquals(cname, record.get().name().asString());
assertEquals(data, record.get().data().asString());
});
Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"),
defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud"));
globalDnsNamesByInstance.forEach((instance, dnsNames) -> {
Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance)
.scope(Endpoint.Scope.global)
.asList().stream()
.map(Endpoint::dnsName)
.collect(Collectors.toSet());
assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance);
});
}
@Test
void testDnsUpdatesForGlobalEndpointLegacySyntax() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
for (Deployment deployment : deployments) {
assertEquals(Set.of("rotation-id-01",
"app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(1, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId())
.scope(Endpoint.Scope.global)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"),
globalDnsNames);
}
@Test
void testDnsUpdatesForMultipleGlobalEndpoints() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("foobar", "qrs", "us-west-1", "us-central-1")
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.endpoint("all", "qrs")
.endpoint("west", "qrs", "us-west-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
var notWest = Set.of(
"rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud"
);
var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud"));
for (Deployment deployment : deployments) {
assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest,
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(4, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record1.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record2.isPresent());
assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record3.isPresent());
assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString());
assertEquals("rotation-fqdn-03.", record3.get().data().asString());
var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record4.isPresent());
assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString());
assertEquals("rotation-fqdn-04.", record4.get().data().asString());
}
@Test
void testDnsUpdatesForGlobalEndpointChanges() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
var west = ZoneId.from("prod", "us-west-1");
var central = ZoneId.from("prod", "us-central-1");
var east = ZoneId.from("prod", "us-east-3");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage2).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
assertEquals(
Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(east))
,
"Zone " + east + " is a member of global endpoint");
ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage3).deploy();
for (var zone : List.of(west, central, east)) {
assertEquals(
zone.equals(east)
? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud")
: Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage4);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " +
"and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage5);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage6);
}
@Test
void testUnassignRotations() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-central-1")
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage2).deploy();
assertEquals(List.of(), context.instance().rotations());
assertEquals(
Set.of(),
tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1")))
);
}
@Test
void testDnsUpdatesWithChangeInRotationAssignment() {
String dnsName1 = "app1.tenant1.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
{
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isPresent());
assertEquals(dnsName1, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage);
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.application().id().tenant()));
try (RotationLock lock = tester.controller().routing().rotations().lock()) {
assertTrue(tester.controller().routing().rotations().availableRotations(lock)
.containsKey(new RotationId("rotation-id-01")),
"Rotation is unassigned");
}
context.flushDnsUpdates();
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isEmpty(), dnsName1 + " is removed");
}
String dnsName2 = "app2.tenant2.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant2", "app2", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
var record = tester.controllerTester().findCname(dnsName2);
assertTrue(record.isPresent());
assertEquals(dnsName2, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString());
assertEquals(2, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname(dnsName1);
assertTrue(record1.isPresent());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname(dnsName2);
assertTrue(record2.isPresent());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
}
}
@Test
void testDnsUpdatesForApplicationEndpoint() {
ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta");
ApplicationId main = ApplicationId.from("tenant1", "app1", "main");
var context = tester.newDeploymentContext(beta);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region("us-east-3")
.region("us-west-1")
.region("aws-us-east-1a")
.region("aws-us-east-1b")
.applicationEndpoint("a", "default",
Map.of("aws-us-east-1a", Map.of(beta.instance(), 2,
main.instance(), 8),
"aws-us-east-1b", Map.of(main.instance(), 1)))
.applicationEndpoint("b", "default", "aws-us-east-1a",
Map.of(beta.instance(), 1,
main.instance(), 1))
.applicationEndpoint("c", "default", "aws-us-east-1b",
Map.of(beta.instance(), 4))
.applicationEndpoint("d", "default", "us-west-1",
Map.of(main.instance(), 7,
beta.instance(), 3))
.applicationEndpoint("e", "default", "us-east-3",
Map.of(main.instance(), 3))
.build();
context.submit(applicationPackage).deploy();
ZoneId east3 = ZoneId.from("prod", "us-east-3");
ZoneId west1 = ZoneId.from("prod", "us-west-1");
ZoneId east1a = ZoneId.from("prod", "aws-us-east-1a");
ZoneId east1b = ZoneId.from("prod", "aws-us-east-1b");
Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of(
new DeploymentId(beta, east3), Map.of(),
new DeploymentId(main, east3), Map.of("e.app1.tenant1.us-east-3-r.vespa.oath.cloud", 3),
new DeploymentId(beta, west1), Map.of("d.app1.tenant1.us-west-1-r.vespa.oath.cloud", 3),
new DeploymentId(main, west1), Map.of("d.app1.tenant1.us-west-1-r.vespa.oath.cloud", 7),
new DeploymentId(beta, east1a), Map.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 2,
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 1),
new DeploymentId(main, east1a), Map.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 8,
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud", 1),
new DeploymentId(beta, east1b), Map.of("c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud", 4),
new DeploymentId(main, east1b), Map.of("a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud", 1)
);
deploymentEndpoints.forEach((deployment, endpoints) -> {
Set<ContainerEndpoint> expected = endpoints.entrySet().stream()
.map(kv -> new ContainerEndpoint("default", "application",
List.of(kv.getKey()),
OptionalInt.of(kv.getValue()),
tester.controller().zoneRegistry().routingMethod(deployment.zoneId())))
.collect(Collectors.toSet());
assertEquals(expected,
tester.configServer().containerEndpoints().get(deployment),
"Endpoint names for " + deployment + " are passed to config server");
});
context.flushDnsUpdates();
Set<Record> records = tester.controllerTester().nameService().records();
assertEquals(new TreeSet<>(Set.of(new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1b.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1b.")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/2")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/8")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/4")),
new Record(Record.Type.CNAME,
RecordName.from("d.app1.tenant1.us-west-1-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-west-1.")),
new Record(Record.Type.CNAME,
RecordName.from("e.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-east-3.")))),
new TreeSet<>(records));
List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application())
.scope(Endpoint.Scope.application)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud",
"a.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud",
"b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud",
"c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud",
"d.app1.tenant1.us-west-1-r.vespa.oath.cloud",
"e.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
endpointDnsNames);
}
@Test
void testDevDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]);
var context = tester.newDeploymentContext();
ZoneId zone = ZoneId.from("dev", "us-east-1");
tester.controllerTester().zoneRegistry()
.setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4);
context.runJob(zone, applicationPackage);
assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(),
"Application deployed and activated");
assertTrue(context.instanceJobs().isEmpty(),
"No job status added");
assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored");
Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone))
.asList()
.stream()
.map(Endpoint::routingMethod)
.collect(Collectors.toSet());
assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4));
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
tester.controller().applications().deactivate(context.instanceId(), zone);
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
}
@Test
void testDevDeploymentWithIncompatibleVersions() {
Version version1 = new Version("7");
Version version2 = new Version("7.5");
Version version3 = new Version("8");
var context = tester.newDeploymentContext();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
tester.controllerTester().upgradeSystem(version2);
tester.newDeploymentContext("keep", "v2", "alive").submit().deploy();
ZoneId zone = ZoneId.from("dev", "us-east-1");
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build());
assertEquals(version2, context.deployment(zone).version());
assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major that does not yet exist");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage());
}
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms are compatible with compile version 8", e.getMessage());
}
tester.controllerTester().upgradeSystem(version3);
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major which is incompatible with compile version");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage());
}
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
}
@Test
void testSuspension() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-east-3")
.build();
context.submit(applicationPackage).deploy();
DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3")));
assertFalse(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
tester.configServer().setSuspension(deployment1, true);
assertTrue(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
}
@Test
void testDeletingApplicationThatHasAlreadyBeenDeleted() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
context.submit(applicationPackage).runJob(zone, applicationPackage);
tester.controller().applications().deactivate(context.instanceId(), zone);
tester.controller().applications().deactivate(context.instanceId(), zone);
}
@Test
void testDeployApplicationWithWarnings() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from("prod", "us-west-1");
int warnings = 3;
tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings);
context.submit(applicationPackage).deploy();
assertEquals(warnings, context.deployment(zone)
.metrics().warnings().get(DeploymentMetrics.Warning.all).intValue());
}
@Test
void testDeploySelectivelyProvisionsCertificate() {
Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id());
var context1 = tester.newDeploymentContext("tenant1", "app1", "default");
var prodZone = ZoneId.from("prod", "us-west-1");
var stagingZone = ZoneId.from("staging", "us-east-3");
var testZone = ZoneId.from("test", "us-east-1");
tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone));
var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.region(prodZone.region())
.build();
context1.submit(applicationPackage).deploy();
var cert = certificate.apply(context1.instance());
assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod);
assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud",
"app1.tenant1.global.vespa.oath.cloud",
"*.app1.tenant1.global.vespa.oath.cloud"),
Stream.of(prodZone, testZone, stagingZone)
.flatMap(zone -> Stream.of("", "*.")
.map(prefix -> prefix + "app1.tenant1." + zone.region().value() +
(zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) +
".vespa.oath.cloud")))
.collect(Collectors.toUnmodifiableSet()),
Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId())));
context1.submit(applicationPackage).deploy();
assertEquals(cert, certificate.apply(context1.instance()));
var context2 = tester.newDeploymentContext("tenant1", "app2", "default");
var devZone = ZoneId.from("dev", "us-east-1");
context2.runJob(devZone, applicationPackage);
assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(),
"Application deployed and activated");
assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer");
}
@Test
void testDeployWithGlobalEndpointsInMultipleClouds() {
tester.controllerTester().zoneRegistry().setZones(
ZoneApiMock.fromId("test.us-west-1"),
ZoneApiMock.fromId("staging.us-west-1"),
ZoneApiMock.fromId("prod.us-west-1"),
ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build()
);
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("default", "default")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
var applicationPackage2 = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("aws", "default", "aws-us-east-1")
.endpoint("foo", "default", "aws-us-east-1", "us-west-1")
.build();
try {
context.submit(applicationPackage2);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
}
@Test
void testDeployWithoutSourceRevision() {
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.region("us-west-1")
.build();
context.submit(applicationPackage, Optional.empty())
.deploy();
assertEquals(1, context.instance().deployments().size(), "Deployed application");
}
@Test
void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.endpoint("east", "default", zone2.region().value())
.region(zone1.region())
.region(zone2.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4);
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive);
context.submit(applicationPackage).deploy();
var expectedRecords = List.of(
new Record(Record.Type.ALIAS,
RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"),
new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"),
"dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()),
new Record(Record.Type.ALIAS,
RecordName.from("east.application.tenant.global.vespa.oath.cloud"),
new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"),
"dns-zone-1", ZoneId.from("prod.us-east-3")).pack()),
new Record(Record.Type.CNAME,
RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"),
RecordData.from("lb-0--tenant.application.default--prod.us-east-3.")));
assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records()));
}
@Test
void testDeploymentDirectRouting() {
DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main));
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var zone3 = ZoneId.from("prod", "eu-west-1");
tester.controllerTester().zoneRegistry()
.exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3));
var applicationPackageBuilder = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.region(zone3.region())
.endpoint("default", "default")
.endpoint("foo", "qrs")
.endpoint("us", "default", zone1.region().value(), zone2.region().value())
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"));
context.submit(applicationPackageBuilder.build()).deploy();
for (var zone : List.of(zone1, zone2)) {
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud",
"us.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)),
"Expected container endpoints in " + zone);
}
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)),
"Expected container endpoints in " + zone3);
}
@Test
void testChangeEndpointCluster() {
var context = tester.newDeploymentContext();
var west = ZoneId.from("prod", "us-west-1");
var east = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region(west.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " +
"'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " +
"deployment.xml. Deploying given deployment.xml will remove " +
"[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " +
"[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " +
"<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " +
"https:
}
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
}
@Test
void testReadableApplications() {
var db = new MockCuratorDb(tester.controller().system());
var tester = new DeploymentTester(new ControllerTester(db));
var app1 = tester.newDeploymentContext("t1", "a1", "default")
.submit()
.deploy();
var app2 = tester.newDeploymentContext("t2", "a2", "default")
.submit()
.deploy();
assertEquals(2, tester.applications().readable().size());
db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()),
new byte[]{(byte) 0xDE, (byte) 0xAD});
assertEquals(1, tester.applications().readable().size());
try {
tester.applications().asList();
fail("Expected exception");
} catch (Exception ignored) {
}
app1.submit().deploy();
}
@Test
void testClashingEndpointIdAndInstanceName() {
String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" +
" <instance id=\"default\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"dev\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
" <instance id=\"dev\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"default\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
try {
tester.newDeploymentContext().submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'",
e.getMessage());
}
}
@Test
void testTestPackageWarnings() {
String deploymentXml = "<deployment version='1.0'>\n" +
" <prod>\n" +
" <region>us-west-1</region>\n" +
" </prod>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0]));
var app = tester.newDeploymentContext();
tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1);
assertEquals(List.of(new Notification(tester.clock().instant(),
Type.testPackage,
Level.warning,
NotificationSource.from(app.application().id()),
List.of("test package has staging tests, so it should also include staging setup",
"see https:
tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true));
}
@Test
void testCompileVersion() {
DeploymentContext context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build();
TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId());
Version version0 = Version.fromString("7.1");
tester.controllerTester().upgradeSystem(version0);
tester.upgrader().overrideConfidence(version0, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
context.submit(applicationPackage).deploy();
Version version1 = Version.fromString("7.2");
tester.controllerTester().upgradeSystem(version1);
tester.upgrader().overrideConfidence(version1, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().maintain();
context.deployPlatform(version1);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy();
TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app");
Version version2 = Version.fromString("8.0");
tester.controllerTester().upgradeSystem(version2);
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.broken);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals("no suitable, released compile version exists",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.empty()))
.getMessage());
assertEquals("no suitable, released compile version exists for specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
}
@Test
void testCloudAccount() {
DeploymentContext context = tester.newDeploymentContext();
ZoneId devZone = devUsEast1.zone();
ZoneId prodZone = productionUsWest1.zone();
String cloudAccount = "012345678912";
var applicationPackage = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone.region())
.build();
context.submit(applicationPackage).runJobExpectingFailure(systemTest, "Requested cloud account '012345678912' is not valid for tenant 'tenant'");
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
context.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'")
.abortJob(stagingTest);
tester.controllerTester().zoneRegistry().configureCloudAccount(CloudAccount.from(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone);
context.submit(applicationPackage).deploy();
tester.controllerTester().zoneRegistry().configureCloudAccount(CloudAccount.from(cloudAccount), devZone);
context.runJob(devZone, applicationPackage);
for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) {
assertEquals(cloudAccount, tester.controllerTester().configServer()
.cloudAccount(context.deploymentIdIn(zoneId))
.get().value());
}
}
@Test
@Test
void testSubmitWithElementDeprecatedOnPreviousMajor() {
DeploymentContext context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.compileVersion(Version.fromString("8.1"))
.region("us-west-1")
.globalServiceId("qrs")
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7"));
}
}
} | |
Suggest to change error message to something like: "...validation failed. _If this change was intentional, update the dependency spec by running:_" | void fails_on_missing_dependency() {
SortedSet<Dependency> dependencies = new TreeSet<>(Set.of(
Dependency.fromString("com.example:foo:1.2.3")));
Path specFile = Paths.get("src/test/resources/allowed-dependencies.txt");
var exception = assertThrows(EnforcerRuleException.class,
() -> validateDependencies(dependencies, specFile, POM_FILE, "my-dep-enforcer"));
String expectedErrorMessage =
"""
The dependency enforcer failed:
Removed dependencies:
- com.example:bar:2.3.4
Maven dependency validation failed. To update dependency spec run:
$ mvn validate -DdependencyEnforcer.writeSpec -pl my-dep-enforcer -f /vespa-src/pom.xml
""";
assertEquals(expectedErrorMessage, exception.getMessage());
} | Maven dependency validation failed. To update dependency spec run: | void fails_on_missing_dependency() {
SortedSet<Dependency> dependencies = new TreeSet<>(Set.of(
Dependency.fromString("com.example:foo:1.2.3")));
Path specFile = Paths.get("src/test/resources/allowed-dependencies.txt");
var exception = assertThrows(EnforcerRuleException.class,
() -> validateDependencies(dependencies, specFile, POM_FILE, "my-dep-enforcer"));
String expectedErrorMessage =
"""
The dependency enforcer failed:
Removed dependencies:
- com.example:bar:2.3.4
Maven dependency validation failed. If this change was intentional, update the dependency spec by running:
$ mvn validate -DdependencyEnforcer.writeSpec -pl my-dep-enforcer -f /vespa-src/pom.xml
""";
assertEquals(expectedErrorMessage, exception.getMessage());
} | class EnforceDependenciesAllProjectsTest {
private static final Path POM_FILE = Paths.get("/vespa-src/pom.xml");
@Test
void succeeds_dependencies_matches_spec() {
SortedSet<Dependency> dependencies = new TreeSet<>(Set.of(
Dependency.fromString("com.example:foo:1.2.3"),
Dependency.fromString("com.example:bar:2.3.4")));
Path specFile = Paths.get("src/test/resources/allowed-dependencies.txt");
assertDoesNotThrow(() -> validateDependencies(dependencies, specFile, POM_FILE, "my-dep-enforcer"));
}
@Test
void fails_on_forbidden_dependency() {
SortedSet<Dependency> dependencies = new TreeSet<>(Set.of(
Dependency.fromString("com.example:foo:1.2.3"),
Dependency.fromString("com.example:bar:2.3.4"),
Dependency.fromString("com.example:foobar:3.4.5")));
Path specFile = Paths.get("src/test/resources/allowed-dependencies.txt");
var exception = assertThrows(EnforcerRuleException.class,
() -> validateDependencies(dependencies, specFile, POM_FILE, "my-dep-enforcer"));
String expectedErrorMessage =
"""
The dependency enforcer failed:
Forbidden dependencies:
- com.example:foobar:3.4.5
Maven dependency validation failed. To update dependency spec run:
$ mvn validate -DdependencyEnforcer.writeSpec -pl my-dep-enforcer -f /vespa-src/pom.xml
""";
assertEquals(expectedErrorMessage, exception.getMessage());
}
@Test
@Test
void writes_valid_spec_file(@TempDir Path tempDir) throws IOException {
SortedSet<Dependency> dependencies = new TreeSet<>(Set.of(
Dependency.fromString("com.example:foo:1.2.3"),
Dependency.fromString("com.example:bar:2.3.4")));
Path outputFile = tempDir.resolve("allowed-dependencies.txt");
writeDependencySpec(outputFile, dependencies);
assertEquals(
Files.readString(Paths.get("src/test/resources/allowed-dependencies.txt")),
Files.readString(outputFile));
}
} | class EnforceDependenciesAllProjectsTest {
private static final Path POM_FILE = Paths.get("/vespa-src/pom.xml");
@Test
void succeeds_dependencies_matches_spec() {
SortedSet<Dependency> dependencies = new TreeSet<>(Set.of(
Dependency.fromString("com.example:foo:1.2.3"),
Dependency.fromString("com.example:bar:2.3.4")));
Path specFile = Paths.get("src/test/resources/allowed-dependencies.txt");
assertDoesNotThrow(() -> validateDependencies(dependencies, specFile, POM_FILE, "my-dep-enforcer"));
}
@Test
void fails_on_forbidden_dependency() {
SortedSet<Dependency> dependencies = new TreeSet<>(Set.of(
Dependency.fromString("com.example:foo:1.2.3"),
Dependency.fromString("com.example:bar:2.3.4"),
Dependency.fromString("com.example:foobar:3.4.5")));
Path specFile = Paths.get("src/test/resources/allowed-dependencies.txt");
var exception = assertThrows(EnforcerRuleException.class,
() -> validateDependencies(dependencies, specFile, POM_FILE, "my-dep-enforcer"));
String expectedErrorMessage =
"""
The dependency enforcer failed:
Forbidden dependencies:
- com.example:foobar:3.4.5
Maven dependency validation failed. If this change was intentional, update the dependency spec by running:
$ mvn validate -DdependencyEnforcer.writeSpec -pl my-dep-enforcer -f /vespa-src/pom.xml
""";
assertEquals(expectedErrorMessage, exception.getMessage());
}
@Test
@Test
void writes_valid_spec_file(@TempDir Path tempDir) throws IOException {
SortedSet<Dependency> dependencies = new TreeSet<>(Set.of(
Dependency.fromString("com.example:foo:1.2.3"),
Dependency.fromString("com.example:bar:2.3.4")));
Path outputFile = tempDir.resolve("allowed-dependencies.txt");
writeDependencySpec(outputFile, dependencies);
assertEquals(
Files.readString(Paths.get("src/test/resources/allowed-dependencies.txt")),
Files.readString(outputFile));
}
} |
Multi-regional endpoint, now guaranteed to pick the lexicographically first region. This is not in actual use. | void testDnsUpdatesForApplicationEndpoint() {
ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta");
ApplicationId main = ApplicationId.from("tenant1", "app1", "main");
var context = tester.newDeploymentContext(beta);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region("us-east-3")
.region("us-west-1")
.region("aws-us-east-1a")
.region("aws-us-east-1b")
.applicationEndpoint("a", "default",
Map.of("aws-us-east-1a", Map.of(beta.instance(), 2,
main.instance(), 8),
"aws-us-east-1b", Map.of(main.instance(), 1)))
.applicationEndpoint("b", "default", "aws-us-east-1a",
Map.of(beta.instance(), 1,
main.instance(), 1))
.applicationEndpoint("c", "default", "aws-us-east-1b",
Map.of(beta.instance(), 4))
.applicationEndpoint("d", "default", "us-west-1",
Map.of(main.instance(), 7,
beta.instance(), 3))
.applicationEndpoint("e", "default", "us-east-3",
Map.of(main.instance(), 3))
.build();
context.submit(applicationPackage).deploy();
ZoneId east3 = ZoneId.from("prod", "us-east-3");
ZoneId west1 = ZoneId.from("prod", "us-west-1");
ZoneId east1a = ZoneId.from("prod", "aws-us-east-1a");
ZoneId east1b = ZoneId.from("prod", "aws-us-east-1b");
Map<DeploymentId, Map<List<String>, Integer>> deploymentEndpoints = Map.of(
new DeploymentId(beta, east3), Map.of(),
new DeploymentId(main, east3), Map.of(List.of("e.app1.tenant1.a.vespa.oath.cloud", "e.app1.tenant1.us-east-3-r.vespa.oath.cloud"), 3),
new DeploymentId(beta, west1), Map.of(List.of("d.app1.tenant1.a.vespa.oath.cloud", "d.app1.tenant1.us-west-1-r.vespa.oath.cloud"), 3),
new DeploymentId(main, west1), Map.of(List.of("d.app1.tenant1.a.vespa.oath.cloud", "d.app1.tenant1.us-west-1-r.vespa.oath.cloud"), 7),
new DeploymentId(beta, east1a), Map.of(List.of("a.app1.tenant1.a.vespa.oath.cloud", "a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"), 2,
List.of("b.app1.tenant1.a.vespa.oath.cloud", "b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"), 1),
new DeploymentId(main, east1a), Map.of(List.of("a.app1.tenant1.a.vespa.oath.cloud", "a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"), 8,
List.of("b.app1.tenant1.a.vespa.oath.cloud", "b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"), 1),
new DeploymentId(beta, east1b), Map.of(List.of("c.app1.tenant1.a.vespa.oath.cloud", "c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"), 4),
new DeploymentId(main, east1b), Map.of(List.of("a.app1.tenant1.a.vespa.oath.cloud", "a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"), 1)
);
deploymentEndpoints.forEach((deployment, endpoints) -> {
Set<ContainerEndpoint> expected = endpoints.entrySet().stream()
.map(kv -> new ContainerEndpoint("default", "application",
kv.getKey(),
OptionalInt.of(kv.getValue()),
tester.controller().zoneRegistry().routingMethod(deployment.zoneId())))
.collect(Collectors.toSet());
assertEquals(expected,
tester.configServer().containerEndpoints().get(deployment),
"Endpoint names for " + deployment + " are passed to config server");
});
context.flushDnsUpdates();
Set<Record> records = tester.controllerTester().nameService().records();
assertEquals(new TreeSet<>(Set.of(new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1b.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1b.")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/2")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/8")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("c.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/4")),
new Record(Record.Type.CNAME,
RecordName.from("d.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("vip.prod.us-west-1.")),
new Record(Record.Type.CNAME,
RecordName.from("e.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("vip.prod.us-east-3.")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/2")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/8")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/4")),
new Record(Record.Type.CNAME,
RecordName.from("d.app1.tenant1.us-west-1-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-west-1.")),
new Record(Record.Type.CNAME,
RecordName.from("e.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-east-3.")))),
new TreeSet<>(records));
List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application())
.scope(Endpoint.Scope.application)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("a.app1.tenant1.a.vespa.oath.cloud",
"b.app1.tenant1.a.vespa.oath.cloud",
"c.app1.tenant1.a.vespa.oath.cloud",
"d.app1.tenant1.a.vespa.oath.cloud",
"e.app1.tenant1.a.vespa.oath.cloud"),
endpointDnsNames);
} | new DeploymentId(main, east1b), Map.of(List.of("a.app1.tenant1.a.vespa.oath.cloud", "a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"), 1) | void testDnsUpdatesForApplicationEndpoint() {
ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta");
ApplicationId main = ApplicationId.from("tenant1", "app1", "main");
var context = tester.newDeploymentContext(beta);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region("us-east-3")
.region("us-west-1")
.region("aws-us-east-1a")
.region("aws-us-east-1b")
.applicationEndpoint("a", "default",
Map.of("aws-us-east-1a", Map.of(beta.instance(), 2,
main.instance(), 8),
"aws-us-east-1b", Map.of(main.instance(), 1)))
.applicationEndpoint("b", "default", "aws-us-east-1a",
Map.of(beta.instance(), 1,
main.instance(), 1))
.applicationEndpoint("c", "default", "aws-us-east-1b",
Map.of(beta.instance(), 4))
.applicationEndpoint("d", "default", "us-west-1",
Map.of(main.instance(), 7,
beta.instance(), 3))
.applicationEndpoint("e", "default", "us-east-3",
Map.of(main.instance(), 3))
.build();
context.submit(applicationPackage).deploy();
ZoneId east3 = ZoneId.from("prod", "us-east-3");
ZoneId west1 = ZoneId.from("prod", "us-west-1");
ZoneId east1a = ZoneId.from("prod", "aws-us-east-1a");
ZoneId east1b = ZoneId.from("prod", "aws-us-east-1b");
Map<DeploymentId, Map<List<String>, Integer>> deploymentEndpoints = Map.of(
new DeploymentId(beta, east3), Map.of(),
new DeploymentId(main, east3), Map.of(List.of("e.app1.tenant1.a.vespa.oath.cloud", "e.app1.tenant1.us-east-3-r.vespa.oath.cloud"), 3),
new DeploymentId(beta, west1), Map.of(List.of("d.app1.tenant1.a.vespa.oath.cloud", "d.app1.tenant1.us-west-1-r.vespa.oath.cloud"), 3),
new DeploymentId(main, west1), Map.of(List.of("d.app1.tenant1.a.vespa.oath.cloud", "d.app1.tenant1.us-west-1-r.vespa.oath.cloud"), 7),
new DeploymentId(beta, east1a), Map.of(List.of("a.app1.tenant1.a.vespa.oath.cloud", "a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"), 2,
List.of("b.app1.tenant1.a.vespa.oath.cloud", "b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"), 1),
new DeploymentId(main, east1a), Map.of(List.of("a.app1.tenant1.a.vespa.oath.cloud", "a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"), 8,
List.of("b.app1.tenant1.a.vespa.oath.cloud", "b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"), 1),
new DeploymentId(beta, east1b), Map.of(List.of("c.app1.tenant1.a.vespa.oath.cloud", "c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"), 4),
new DeploymentId(main, east1b), Map.of(List.of("a.app1.tenant1.a.vespa.oath.cloud", "a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"), 1)
);
deploymentEndpoints.forEach((deployment, endpoints) -> {
Set<ContainerEndpoint> expected = endpoints.entrySet().stream()
.map(kv -> new ContainerEndpoint("default", "application",
kv.getKey(),
OptionalInt.of(kv.getValue()),
tester.controller().zoneRegistry().routingMethod(deployment.zoneId())))
.collect(Collectors.toSet());
assertEquals(expected,
tester.configServer().containerEndpoints().get(deployment),
"Endpoint names for " + deployment + " are passed to config server");
});
context.flushDnsUpdates();
Set<Record> records = tester.controllerTester().nameService().records();
assertEquals(new TreeSet<>(Set.of(new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("beta.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.beta--prod.aws-us-east-1b.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1a.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1a.")),
new Record(Record.Type.CNAME,
RecordName.from("main.app1.tenant1.aws-us-east-1b.vespa.oath.cloud"),
RecordData.from("lb-0--tenant1.app1.main--prod.aws-us-east-1b.")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/2")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/8")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("c.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/4")),
new Record(Record.Type.CNAME,
RecordName.from("d.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("vip.prod.us-west-1.")),
new Record(Record.Type.CNAME,
RecordName.from("e.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("vip.prod.us-east-3.")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/2")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/8")),
new Record(Record.Type.ALIAS,
RecordName.from("a.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("b.app1.tenant1.aws-us-east-1a-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.main--prod.aws-us-east-1a/dns-zone-1/prod.aws-us-east-1a/1")),
new Record(Record.Type.ALIAS,
RecordName.from("c.app1.tenant1.aws-us-east-1b-r.vespa.oath.cloud"),
RecordData.from("weighted/lb-0--tenant1.app1.beta--prod.aws-us-east-1b/dns-zone-1/prod.aws-us-east-1b/4")),
new Record(Record.Type.CNAME,
RecordName.from("d.app1.tenant1.us-west-1-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-west-1.")),
new Record(Record.Type.CNAME,
RecordName.from("e.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-east-3.")))),
new TreeSet<>(records));
List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application())
.scope(Endpoint.Scope.application)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("a.app1.tenant1.a.vespa.oath.cloud",
"b.app1.tenant1.a.vespa.oath.cloud",
"c.app1.tenant1.a.vespa.oath.cloud",
"d.app1.tenant1.a.vespa.oath.cloud",
"e.app1.tenant1.a.vespa.oath.cloud"),
endpointDnsNames);
} | class ControllerTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
void testDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.explicitEnvironment(Environment.dev, Environment.perf)
.region("us-west-1")
.region("us-east-3")
.build();
Version version1 = tester.configServer().initialVersion();
var context = tester.newDeploymentContext();
context.submit(applicationPackage);
assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)),
context.application().revisions().get(context.instance().change().revision().get()),
"Application version is known from completion of initial job");
context.runJob(systemTest);
context.runJob(stagingTest);
RevisionId applicationVersion = context.instance().change().revision().get();
assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment");
tester.triggerJobs();
tester.clock().advance(Duration.ofSeconds(1));
context.timeOutUpgrade(productionUsWest1);
assertEquals(4, context.instanceJobs().size());
tester.triggerJobs();
tester.controllerTester().createNewController();
assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1")));
assertNotNull(tester.controller().applications().requireInstance(context.instanceId()));
context.submit(applicationPackage);
context.runJob(systemTest);
context.runJob(stagingTest);
context.triggerJobs().jobAborted(productionUsWest1);
context.runJob(productionUsWest1);
tester.triggerJobs();
context.runJob(productionUsEast3);
assertEquals(4, context.instanceJobs().size());
applicationPackage = new ApplicationPackageBuilder()
.instances("hellO")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("deep-space-9")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("us-east-3")
.build();
try {
assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1")));
context.submit(applicationPackage);
fail("Expected exception due to illegal production deployment removal");
}
catch (IllegalArgumentException e) {
assertEquals("deployment-removal: application instance 'tenant.application.default' is deployed in us-west-1, " +
"but this instance and region combination is removed from deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval),
e.getMessage());
}
assertNotNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was not removed");
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.upgradePolicy("default")
.region("us-east-3")
.build();
context.submit(applicationPackage);
assertNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was removed");
assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed");
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.instanceId().tenant()));
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
assertNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.deploymentIdIn(productionUsWest1.zone())));
}
@Test
void testGlobalRotationStatus() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.build();
context.submit(applicationPackage).deploy();
var deployment1 = context.deploymentIdIn(zone1);
DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1);
RoutingStatus status1 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.in, status1.value());
routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator);
RoutingStatus status2 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.out, status2.value());
RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus();
assertEquals(RoutingStatus.Value.in, status3.value());
}
@Test
void testDnsUpdatesForGlobalEndpoint() {
var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta");
var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default");
ZoneId usWest = ZoneId.from("prod.us-west-1");
ZoneId usCentral = ZoneId.from("prod.us-central-1");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,default")
.endpoint("default", "foo")
.region(usWest.region())
.region(usCentral.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)),
RoutingMethod.sharedLayer4);
betaContext.submit(applicationPackage).deploy();
{
Collection<Deployment> betaDeployments = betaContext.instance().deployments().values();
assertFalse(betaDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("beta.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-01"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : betaDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints()
.get(betaContext.deploymentIdIn(deployment.zone())));
}
betaContext.flushDnsUpdates();
}
{
Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values();
assertFalse(defaultDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : defaultDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone())));
}
defaultContext.flushDnsUpdates();
}
Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.",
"app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02.");
rotationCnames.forEach((cname, data) -> {
var record = tester.controllerTester().findCname(cname);
assertTrue(record.isPresent());
assertEquals(cname, record.get().name().asString());
assertEquals(data, record.get().data().asString());
});
Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"),
defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud"));
globalDnsNamesByInstance.forEach((instance, dnsNames) -> {
Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance)
.scope(Endpoint.Scope.global)
.asList().stream()
.map(Endpoint::dnsName)
.collect(Collectors.toSet());
assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance);
});
}
@Test
void testDnsUpdatesForGlobalEndpointLegacySyntax() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
for (Deployment deployment : deployments) {
assertEquals(Set.of("rotation-id-01",
"app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(1, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId())
.scope(Endpoint.Scope.global)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"),
globalDnsNames);
}
@Test
void testDnsUpdatesForMultipleGlobalEndpoints() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("foobar", "qrs", "us-west-1", "us-central-1")
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.endpoint("all", "qrs")
.endpoint("west", "qrs", "us-west-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
var notWest = Set.of(
"rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud"
);
var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud"));
for (Deployment deployment : deployments) {
assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest,
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(4, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record1.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record2.isPresent());
assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record3.isPresent());
assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString());
assertEquals("rotation-fqdn-03.", record3.get().data().asString());
var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record4.isPresent());
assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString());
assertEquals("rotation-fqdn-04.", record4.get().data().asString());
}
@Test
void testDnsUpdatesForGlobalEndpointChanges() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
var west = ZoneId.from("prod", "us-west-1");
var central = ZoneId.from("prod", "us-central-1");
var east = ZoneId.from("prod", "us-east-3");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage2).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
assertEquals(
Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(east))
,
"Zone " + east + " is a member of global endpoint");
ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage3).deploy();
for (var zone : List.of(west, central, east)) {
assertEquals(
zone.equals(east)
? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud")
: Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage4);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " +
"and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage5);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage6);
}
@Test
void testUnassignRotations() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-central-1")
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage2).deploy();
assertEquals(List.of(), context.instance().rotations());
assertEquals(
Set.of(),
tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1")))
);
}
@Test
void testDnsUpdatesWithChangeInRotationAssignment() {
String dnsName1 = "app1.tenant1.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
{
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isPresent());
assertEquals(dnsName1, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage);
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.application().id().tenant()));
try (RotationLock lock = tester.controller().routing().rotations().lock()) {
assertTrue(tester.controller().routing().rotations().availableRotations(lock)
.containsKey(new RotationId("rotation-id-01")),
"Rotation is unassigned");
}
context.flushDnsUpdates();
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isEmpty(), dnsName1 + " is removed");
}
String dnsName2 = "app2.tenant2.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant2", "app2", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
var record = tester.controllerTester().findCname(dnsName2);
assertTrue(record.isPresent());
assertEquals(dnsName2, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString());
assertEquals(2, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname(dnsName1);
assertTrue(record1.isPresent());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname(dnsName2);
assertTrue(record2.isPresent());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
}
}
@Test
@Test
void testDevDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]);
var context = tester.newDeploymentContext();
ZoneId zone = ZoneId.from("dev", "us-east-1");
tester.controllerTester().zoneRegistry()
.setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4);
context.runJob(zone, applicationPackage);
assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(),
"Application deployed and activated");
assertTrue(context.instanceJobs().isEmpty(),
"No job status added");
assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored");
Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone))
.asList()
.stream()
.map(Endpoint::routingMethod)
.collect(Collectors.toSet());
assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4));
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
tester.controller().applications().deactivate(context.instanceId(), zone);
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
}
@Test
void testDevDeploymentWithIncompatibleVersions() {
Version version1 = new Version("7");
Version version2 = new Version("7.5");
Version version3 = new Version("8");
var context = tester.newDeploymentContext();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
tester.controllerTester().upgradeSystem(version2);
tester.newDeploymentContext("keep", "v2", "alive").submit().deploy();
ZoneId zone = ZoneId.from("dev", "us-east-1");
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build());
assertEquals(version2, context.deployment(zone).version());
assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major that does not yet exist");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage());
}
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms are compatible with compile version 8", e.getMessage());
}
tester.controllerTester().upgradeSystem(version3);
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major which is incompatible with compile version");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage());
}
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
}
@Test
void testSuspension() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-east-3")
.build();
context.submit(applicationPackage).deploy();
DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3")));
assertFalse(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
tester.configServer().setSuspension(deployment1, true);
assertTrue(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
}
@Test
void testDeletingApplicationThatHasAlreadyBeenDeleted() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
context.submit(applicationPackage).runJob(zone, applicationPackage);
tester.controller().applications().deactivate(context.instanceId(), zone);
tester.controller().applications().deactivate(context.instanceId(), zone);
}
@Test
void testDeployApplicationWithWarnings() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from("prod", "us-west-1");
int warnings = 3;
tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings);
context.submit(applicationPackage).deploy();
assertEquals(warnings, context.deployment(zone)
.metrics().warnings().get(DeploymentMetrics.Warning.all).intValue());
}
@Test
void testDeploySelectivelyProvisionsCertificate() {
Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id());
var context1 = tester.newDeploymentContext("tenant1", "app1", "default");
var prodZone = ZoneId.from("prod", "us-west-1");
var stagingZone = ZoneId.from("staging", "us-east-3");
var testZone = ZoneId.from("test", "us-east-1");
tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone));
var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.region(prodZone.region())
.build();
context1.submit(applicationPackage).deploy();
var cert = certificate.apply(context1.instance());
assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod);
assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud",
"app1.tenant1.global.vespa.oath.cloud",
"*.app1.tenant1.global.vespa.oath.cloud"),
Stream.of(prodZone, testZone, stagingZone)
.flatMap(zone -> Stream.of("", "*.")
.map(prefix -> prefix + "app1.tenant1." + zone.region().value() +
(zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) +
".vespa.oath.cloud")))
.collect(Collectors.toUnmodifiableSet()),
Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId())));
context1.submit(applicationPackage).deploy();
assertEquals(cert, certificate.apply(context1.instance()));
var context2 = tester.newDeploymentContext("tenant1", "app2", "default");
var devZone = ZoneId.from("dev", "us-east-1");
context2.runJob(devZone, applicationPackage);
assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(),
"Application deployed and activated");
assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer");
}
@Test
void testDeployWithGlobalEndpointsInMultipleClouds() {
tester.controllerTester().zoneRegistry().setZones(
ZoneApiMock.fromId("test.us-west-1"),
ZoneApiMock.fromId("staging.us-west-1"),
ZoneApiMock.fromId("prod.us-west-1"),
ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build()
);
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("default", "default")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
var applicationPackage2 = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("aws", "default", "aws-us-east-1")
.endpoint("foo", "default", "aws-us-east-1", "us-west-1")
.build();
try {
context.submit(applicationPackage2);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
}
@Test
void testDeployWithoutSourceRevision() {
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.region("us-west-1")
.build();
context.submit(applicationPackage, Optional.empty())
.deploy();
assertEquals(1, context.instance().deployments().size(), "Deployed application");
}
@Test
void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.endpoint("east", "default", zone2.region().value())
.region(zone1.region())
.region(zone2.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4);
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive);
context.submit(applicationPackage).deploy();
var expectedRecords = List.of(
new Record(Record.Type.ALIAS,
RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"),
new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"),
"dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()),
new Record(Record.Type.ALIAS,
RecordName.from("east.application.tenant.global.vespa.oath.cloud"),
new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"),
"dns-zone-1", ZoneId.from("prod.us-east-3")).pack()),
new Record(Record.Type.CNAME,
RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"),
RecordData.from("lb-0--tenant.application.default--prod.us-east-3.")));
assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records()));
}
@Test
void testDeploymentDirectRouting() {
DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main));
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var zone3 = ZoneId.from("prod", "eu-west-1");
tester.controllerTester().zoneRegistry()
.exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3));
var applicationPackageBuilder = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.region(zone3.region())
.endpoint("default", "default")
.endpoint("foo", "qrs")
.endpoint("us", "default", zone1.region().value(), zone2.region().value())
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"));
context.submit(applicationPackageBuilder.build()).deploy();
for (var zone : List.of(zone1, zone2)) {
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud",
"us.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)),
"Expected container endpoints in " + zone);
}
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)),
"Expected container endpoints in " + zone3);
}
@Test
void testChangeEndpointCluster() {
var context = tester.newDeploymentContext();
var west = ZoneId.from("prod", "us-west-1");
var east = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region(west.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " +
"'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " +
"deployment.xml. Deploying given deployment.xml will remove " +
"[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " +
"[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " +
"<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " +
"https:
}
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
}
@Test
void testReadableApplications() {
var db = new MockCuratorDb(tester.controller().system());
var tester = new DeploymentTester(new ControllerTester(db));
var app1 = tester.newDeploymentContext("t1", "a1", "default")
.submit()
.deploy();
var app2 = tester.newDeploymentContext("t2", "a2", "default")
.submit()
.deploy();
assertEquals(2, tester.applications().readable().size());
db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()),
new byte[]{(byte) 0xDE, (byte) 0xAD});
assertEquals(1, tester.applications().readable().size());
try {
tester.applications().asList();
fail("Expected exception");
} catch (Exception ignored) {
}
app1.submit().deploy();
}
@Test
void testClashingEndpointIdAndInstanceName() {
String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" +
" <instance id=\"default\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"dev\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
" <instance id=\"dev\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"default\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
try {
tester.newDeploymentContext().submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'",
e.getMessage());
}
}
@Test
void testTestPackageWarnings() {
String deploymentXml = "<deployment version='1.0'>\n" +
" <prod>\n" +
" <region>us-west-1</region>\n" +
" </prod>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0]));
var app = tester.newDeploymentContext();
tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1);
assertEquals(List.of(new Notification(tester.clock().instant(),
Type.testPackage,
Level.warning,
NotificationSource.from(app.application().id()),
List.of("test package has staging tests, so it should also include staging setup",
"see https:
tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true));
}
@Test
void testCompileVersion() {
DeploymentContext context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build();
TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId());
Version version0 = Version.fromString("7.1");
tester.controllerTester().upgradeSystem(version0);
tester.upgrader().overrideConfidence(version0, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
context.submit(applicationPackage).deploy();
Version version1 = Version.fromString("7.2");
tester.controllerTester().upgradeSystem(version1);
tester.upgrader().overrideConfidence(version1, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().maintain();
context.deployPlatform(version1);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy();
TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app");
Version version2 = Version.fromString("8.0");
tester.controllerTester().upgradeSystem(version2);
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.broken);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals("no suitable, released compile version exists",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.empty()))
.getMessage());
assertEquals("no suitable, released compile version exists for specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
}
@Test
void testCloudAccount() {
DeploymentContext context = tester.newDeploymentContext();
ZoneId devZone = devUsEast1.zone();
ZoneId prodZone = productionUsWest1.zone();
String cloudAccount = "012345678912";
var applicationPackage = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone.region())
.build();
assertEquals("cloud accounts [012345678912] are not valid for tenant tenant",
assertThrows(IllegalArgumentException.class,
() -> context.submit(applicationPackage))
.getMessage());
assertEquals("cloud accounts [012345678912] are not valid for tenant tenant",
assertThrows(IllegalArgumentException.class,
() -> context.runJob(devUsEast1, applicationPackage))
.getMessage());
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
context.submit(applicationPackage)
.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'")
.abortJob(stagingTest);
tester.controllerTester().zoneRegistry().configureCloudAccount(CloudAccount.from(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone);
context.submit(applicationPackage).deploy();
tester.controllerTester().zoneRegistry().configureCloudAccount(CloudAccount.from(cloudAccount), devZone);
context.runJob(devZone, applicationPackage);
for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) {
assertEquals(cloudAccount, tester.controllerTester().configServer()
.cloudAccount(context.deploymentIdIn(zoneId))
.get().value());
}
}
@Test
void testCloudAccountWithDefaultOverride() {
var context = tester.newDeploymentContext();
var prodZone1 = productionUsEast3.zone();
var prodZone2 = productionUsWest1.zone();
var cloudAccount = "012345678912";
var application = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone1.region())
.region(prodZone2.region(), "default")
.build();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
tester.controllerTester().zoneRegistry().configureCloudAccount(CloudAccount.from(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone1);
context.submit(application).deploy();
assertEquals(cloudAccount, tester.controllerTester().configServer().cloudAccount(context.deploymentIdIn(prodZone1)).get().value());
assertEquals(Optional.empty(), tester.controllerTester().configServer().cloudAccount(context.deploymentIdIn(prodZone2)));
}
@Test
void testSubmitWithElementDeprecatedOnPreviousMajor() {
DeploymentContext context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.compileVersion(Version.fromString("8.1"))
.region("us-west-1")
.globalServiceId("qrs")
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7"));
}
}
} | class ControllerTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
void testDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.explicitEnvironment(Environment.dev, Environment.perf)
.region("us-west-1")
.region("us-east-3")
.build();
Version version1 = tester.configServer().initialVersion();
var context = tester.newDeploymentContext();
context.submit(applicationPackage);
assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)),
context.application().revisions().get(context.instance().change().revision().get()),
"Application version is known from completion of initial job");
context.runJob(systemTest);
context.runJob(stagingTest);
RevisionId applicationVersion = context.instance().change().revision().get();
assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment");
tester.triggerJobs();
tester.clock().advance(Duration.ofSeconds(1));
context.timeOutUpgrade(productionUsWest1);
assertEquals(4, context.instanceJobs().size());
tester.triggerJobs();
tester.controllerTester().createNewController();
assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1")));
assertNotNull(tester.controller().applications().requireInstance(context.instanceId()));
context.submit(applicationPackage);
context.runJob(systemTest);
context.runJob(stagingTest);
context.triggerJobs().jobAborted(productionUsWest1);
context.runJob(productionUsWest1);
tester.triggerJobs();
context.runJob(productionUsEast3);
assertEquals(4, context.instanceJobs().size());
applicationPackage = new ApplicationPackageBuilder()
.instances("hellO")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("deep-space-9")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("us-east-3")
.build();
try {
assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1")));
context.submit(applicationPackage);
fail("Expected exception due to illegal production deployment removal");
}
catch (IllegalArgumentException e) {
assertEquals("deployment-removal: application instance 'tenant.application.default' is deployed in us-west-1, " +
"but this instance and region combination is removed from deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval),
e.getMessage());
}
assertNotNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was not removed");
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.upgradePolicy("default")
.region("us-east-3")
.build();
context.submit(applicationPackage);
assertNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was removed");
assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed");
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.instanceId().tenant()));
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
assertNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.deploymentIdIn(productionUsWest1.zone())));
}
@Test
void testGlobalRotationStatus() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.build();
context.submit(applicationPackage).deploy();
var deployment1 = context.deploymentIdIn(zone1);
DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1);
RoutingStatus status1 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.in, status1.value());
routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator);
RoutingStatus status2 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.out, status2.value());
RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus();
assertEquals(RoutingStatus.Value.in, status3.value());
}
@Test
void testDnsUpdatesForGlobalEndpoint() {
var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta");
var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default");
ZoneId usWest = ZoneId.from("prod.us-west-1");
ZoneId usCentral = ZoneId.from("prod.us-central-1");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,default")
.endpoint("default", "foo")
.region(usWest.region())
.region(usCentral.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)),
RoutingMethod.sharedLayer4);
betaContext.submit(applicationPackage).deploy();
{
Collection<Deployment> betaDeployments = betaContext.instance().deployments().values();
assertFalse(betaDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("beta.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-01"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : betaDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints()
.get(betaContext.deploymentIdIn(deployment.zone())));
}
betaContext.flushDnsUpdates();
}
{
Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values();
assertFalse(defaultDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : defaultDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone())));
}
defaultContext.flushDnsUpdates();
}
Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.",
"app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02.");
rotationCnames.forEach((cname, data) -> {
var record = tester.controllerTester().findCname(cname);
assertTrue(record.isPresent());
assertEquals(cname, record.get().name().asString());
assertEquals(data, record.get().data().asString());
});
Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"),
defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud"));
globalDnsNamesByInstance.forEach((instance, dnsNames) -> {
Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance)
.scope(Endpoint.Scope.global)
.asList().stream()
.map(Endpoint::dnsName)
.collect(Collectors.toSet());
assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance);
});
}
@Test
void testDnsUpdatesForGlobalEndpointLegacySyntax() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
for (Deployment deployment : deployments) {
assertEquals(Set.of("rotation-id-01",
"app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(1, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId())
.scope(Endpoint.Scope.global)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"),
globalDnsNames);
}
@Test
void testDnsUpdatesForMultipleGlobalEndpoints() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("foobar", "qrs", "us-west-1", "us-central-1")
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.endpoint("all", "qrs")
.endpoint("west", "qrs", "us-west-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
var notWest = Set.of(
"rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud"
);
var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud"));
for (Deployment deployment : deployments) {
assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest,
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(4, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record1.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record2.isPresent());
assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record3.isPresent());
assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString());
assertEquals("rotation-fqdn-03.", record3.get().data().asString());
var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record4.isPresent());
assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString());
assertEquals("rotation-fqdn-04.", record4.get().data().asString());
}
@Test
void testDnsUpdatesForGlobalEndpointChanges() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
var west = ZoneId.from("prod", "us-west-1");
var central = ZoneId.from("prod", "us-central-1");
var east = ZoneId.from("prod", "us-east-3");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage2).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
assertEquals(
Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(east))
,
"Zone " + east + " is a member of global endpoint");
ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage3).deploy();
for (var zone : List.of(west, central, east)) {
assertEquals(
zone.equals(east)
? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud")
: Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage4);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " +
"and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage5);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage6);
}
@Test
void testUnassignRotations() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-central-1")
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage2).deploy();
assertEquals(List.of(), context.instance().rotations());
assertEquals(
Set.of(),
tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1")))
);
}
@Test
void testDnsUpdatesWithChangeInRotationAssignment() {
String dnsName1 = "app1.tenant1.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
{
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isPresent());
assertEquals(dnsName1, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage);
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.application().id().tenant()));
try (RotationLock lock = tester.controller().routing().rotations().lock()) {
assertTrue(tester.controller().routing().rotations().availableRotations(lock)
.containsKey(new RotationId("rotation-id-01")),
"Rotation is unassigned");
}
context.flushDnsUpdates();
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isEmpty(), dnsName1 + " is removed");
}
String dnsName2 = "app2.tenant2.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant2", "app2", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
var record = tester.controllerTester().findCname(dnsName2);
assertTrue(record.isPresent());
assertEquals(dnsName2, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString());
assertEquals(2, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname(dnsName1);
assertTrue(record1.isPresent());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname(dnsName2);
assertTrue(record2.isPresent());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
}
}
@Test
@Test
void testDevDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]);
var context = tester.newDeploymentContext();
ZoneId zone = ZoneId.from("dev", "us-east-1");
tester.controllerTester().zoneRegistry()
.setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4);
context.runJob(zone, applicationPackage);
assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(),
"Application deployed and activated");
assertTrue(context.instanceJobs().isEmpty(),
"No job status added");
assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored");
Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone))
.asList()
.stream()
.map(Endpoint::routingMethod)
.collect(Collectors.toSet());
assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4));
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
tester.controller().applications().deactivate(context.instanceId(), zone);
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
}
@Test
void testDevDeploymentWithIncompatibleVersions() {
Version version1 = new Version("7");
Version version2 = new Version("7.5");
Version version3 = new Version("8");
var context = tester.newDeploymentContext();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
tester.controllerTester().upgradeSystem(version2);
tester.newDeploymentContext("keep", "v2", "alive").submit().deploy();
ZoneId zone = ZoneId.from("dev", "us-east-1");
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build());
assertEquals(version2, context.deployment(zone).version());
assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major that does not yet exist");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage());
}
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms are compatible with compile version 8", e.getMessage());
}
tester.controllerTester().upgradeSystem(version3);
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major which is incompatible with compile version");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage());
}
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
}
@Test
void testSuspension() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-east-3")
.build();
context.submit(applicationPackage).deploy();
DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3")));
assertFalse(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
tester.configServer().setSuspension(deployment1, true);
assertTrue(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
}
@Test
void testDeletingApplicationThatHasAlreadyBeenDeleted() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
context.submit(applicationPackage).runJob(zone, applicationPackage);
tester.controller().applications().deactivate(context.instanceId(), zone);
tester.controller().applications().deactivate(context.instanceId(), zone);
}
@Test
void testDeployApplicationWithWarnings() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from("prod", "us-west-1");
int warnings = 3;
tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings);
context.submit(applicationPackage).deploy();
assertEquals(warnings, context.deployment(zone)
.metrics().warnings().get(DeploymentMetrics.Warning.all).intValue());
}
@Test
void testDeploySelectivelyProvisionsCertificate() {
Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id());
var context1 = tester.newDeploymentContext("tenant1", "app1", "default");
var prodZone = ZoneId.from("prod", "us-west-1");
var stagingZone = ZoneId.from("staging", "us-east-3");
var testZone = ZoneId.from("test", "us-east-1");
tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone));
var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.region(prodZone.region())
.build();
context1.submit(applicationPackage).deploy();
var cert = certificate.apply(context1.instance());
assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod);
assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud",
"app1.tenant1.global.vespa.oath.cloud",
"*.app1.tenant1.global.vespa.oath.cloud"),
Stream.of(prodZone, testZone, stagingZone)
.flatMap(zone -> Stream.of("", "*.")
.map(prefix -> prefix + "app1.tenant1." + zone.region().value() +
(zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) +
".vespa.oath.cloud")))
.collect(Collectors.toUnmodifiableSet()),
Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId())));
context1.submit(applicationPackage).deploy();
assertEquals(cert, certificate.apply(context1.instance()));
var context2 = tester.newDeploymentContext("tenant1", "app2", "default");
var devZone = ZoneId.from("dev", "us-east-1");
context2.runJob(devZone, applicationPackage);
assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(),
"Application deployed and activated");
assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer");
}
@Test
void testDeployWithGlobalEndpointsInMultipleClouds() {
tester.controllerTester().zoneRegistry().setZones(
ZoneApiMock.fromId("test.us-west-1"),
ZoneApiMock.fromId("staging.us-west-1"),
ZoneApiMock.fromId("prod.us-west-1"),
ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build()
);
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("default", "default")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
var applicationPackage2 = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("aws", "default", "aws-us-east-1")
.endpoint("foo", "default", "aws-us-east-1", "us-west-1")
.build();
try {
context.submit(applicationPackage2);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
}
@Test
void testDeployWithoutSourceRevision() {
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.region("us-west-1")
.build();
context.submit(applicationPackage, Optional.empty())
.deploy();
assertEquals(1, context.instance().deployments().size(), "Deployed application");
}
@Test
void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.endpoint("east", "default", zone2.region().value())
.region(zone1.region())
.region(zone2.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4);
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive);
context.submit(applicationPackage).deploy();
var expectedRecords = List.of(
new Record(Record.Type.ALIAS,
RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"),
new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"),
"dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()),
new Record(Record.Type.ALIAS,
RecordName.from("east.application.tenant.global.vespa.oath.cloud"),
new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"),
"dns-zone-1", ZoneId.from("prod.us-east-3")).pack()),
new Record(Record.Type.CNAME,
RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"),
RecordData.from("lb-0--tenant.application.default--prod.us-east-3.")));
assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records()));
}
@Test
void testDeploymentDirectRouting() {
DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main));
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var zone3 = ZoneId.from("prod", "eu-west-1");
tester.controllerTester().zoneRegistry()
.exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3));
var applicationPackageBuilder = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.region(zone3.region())
.endpoint("default", "default")
.endpoint("foo", "qrs")
.endpoint("us", "default", zone1.region().value(), zone2.region().value())
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"));
context.submit(applicationPackageBuilder.build()).deploy();
for (var zone : List.of(zone1, zone2)) {
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud",
"us.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)),
"Expected container endpoints in " + zone);
}
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)),
"Expected container endpoints in " + zone3);
}
@Test
void testChangeEndpointCluster() {
var context = tester.newDeploymentContext();
var west = ZoneId.from("prod", "us-west-1");
var east = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region(west.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " +
"'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " +
"deployment.xml. Deploying given deployment.xml will remove " +
"[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " +
"[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " +
"<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " +
"https:
}
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
}
@Test
void testReadableApplications() {
var db = new MockCuratorDb(tester.controller().system());
var tester = new DeploymentTester(new ControllerTester(db));
var app1 = tester.newDeploymentContext("t1", "a1", "default")
.submit()
.deploy();
var app2 = tester.newDeploymentContext("t2", "a2", "default")
.submit()
.deploy();
assertEquals(2, tester.applications().readable().size());
db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()),
new byte[]{(byte) 0xDE, (byte) 0xAD});
assertEquals(1, tester.applications().readable().size());
try {
tester.applications().asList();
fail("Expected exception");
} catch (Exception ignored) {
}
app1.submit().deploy();
}
@Test
void testClashingEndpointIdAndInstanceName() {
String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" +
" <instance id=\"default\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"dev\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
" <instance id=\"dev\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"default\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
try {
tester.newDeploymentContext().submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'",
e.getMessage());
}
}
@Test
void testTestPackageWarnings() {
String deploymentXml = "<deployment version='1.0'>\n" +
" <prod>\n" +
" <region>us-west-1</region>\n" +
" </prod>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0]));
var app = tester.newDeploymentContext();
tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1);
assertEquals(List.of(new Notification(tester.clock().instant(),
Type.testPackage,
Level.warning,
NotificationSource.from(app.application().id()),
List.of("test package has staging tests, so it should also include staging setup",
"see https:
tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true));
}
@Test
void testCompileVersion() {
DeploymentContext context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build();
TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId());
Version version0 = Version.fromString("7.1");
tester.controllerTester().upgradeSystem(version0);
tester.upgrader().overrideConfidence(version0, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
context.submit(applicationPackage).deploy();
Version version1 = Version.fromString("7.2");
tester.controllerTester().upgradeSystem(version1);
tester.upgrader().overrideConfidence(version1, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().maintain();
context.deployPlatform(version1);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy();
TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app");
Version version2 = Version.fromString("8.0");
tester.controllerTester().upgradeSystem(version2);
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.broken);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals("no suitable, released compile version exists",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.empty()))
.getMessage());
assertEquals("no suitable, released compile version exists for specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
}
@Test
void testCloudAccount() {
DeploymentContext context = tester.newDeploymentContext();
ZoneId devZone = devUsEast1.zone();
ZoneId prodZone = productionUsWest1.zone();
String cloudAccount = "012345678912";
var applicationPackage = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone.region())
.build();
assertEquals("cloud accounts [012345678912] are not valid for tenant tenant",
assertThrows(IllegalArgumentException.class,
() -> context.submit(applicationPackage))
.getMessage());
assertEquals("cloud accounts [012345678912] are not valid for tenant tenant",
assertThrows(IllegalArgumentException.class,
() -> context.runJob(devUsEast1, applicationPackage))
.getMessage());
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
context.submit(applicationPackage)
.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'")
.abortJob(stagingTest);
tester.controllerTester().zoneRegistry().configureCloudAccount(CloudAccount.from(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone);
context.submit(applicationPackage).deploy();
tester.controllerTester().zoneRegistry().configureCloudAccount(CloudAccount.from(cloudAccount), devZone);
context.runJob(devZone, applicationPackage);
for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) {
assertEquals(cloudAccount, tester.controllerTester().configServer()
.cloudAccount(context.deploymentIdIn(zoneId))
.get().value());
}
}
@Test
void testCloudAccountWithDefaultOverride() {
var context = tester.newDeploymentContext();
var prodZone1 = productionUsEast3.zone();
var prodZone2 = productionUsWest1.zone();
var cloudAccount = "012345678912";
var application = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone1.region())
.region(prodZone2.region(), "default")
.build();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
tester.controllerTester().zoneRegistry().configureCloudAccount(CloudAccount.from(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone1);
context.submit(application).deploy();
assertEquals(cloudAccount, tester.controllerTester().configServer().cloudAccount(context.deploymentIdIn(prodZone1)).get().value());
assertEquals(Optional.empty(), tester.controllerTester().configServer().cloudAccount(context.deploymentIdIn(prodZone2)));
}
@Test
void testSubmitWithElementDeprecatedOnPreviousMajor() {
DeploymentContext context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.compileVersion(Version.fromString("8.1"))
.region("us-west-1")
.globalServiceId("qrs")
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7"));
}
}
} |
Fix alignment? | private void removeApplicationDnsUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList deploymentPolicies, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingId, List<RoutingPolicy>> routingTable = deploymentPolicies.asApplicationRoutingTable();
Set<RoutingId> removalCandidates = new HashSet<>(routingTable.keySet());
Set<RoutingId> activeRoutingIds = applicationRoutingIds(allocation);
removalCandidates.removeAll(activeRoutingIds);
for (var id : removalCandidates) {
TenantAndApplicationId application = TenantAndApplicationId.from(id.instance());
EndpointList endpoints = controller.routing()
.readDeclaredEndpointsOf(application)
.named(id.endpointId());
List<RoutingPolicy> policies = routingTable.get(id);
for (var policy : policies) {
if (!policy.appliesTo(allocation.deployment)) continue;
NameServiceForwarder forwarder = nameServiceForwarderIn(policy.id().zone());
for (Endpoint endpoint : endpoints) {
if (policy.canonicalName().isPresent()) {
forwarder.removeRecords(Record.Type.ALIAS,
RecordName.from(endpoint.dnsName()),
RecordData.fqdn(policy.canonicalName().get().value()),
Priority.normal);
forwarder.removeRecords(Record.Type.ALIAS,
RecordName.from(endpoint.legacyRegionalDsnName()),
RecordData.fqdn(policy.canonicalName().get().value()),
Priority.normal);
} else {
forwarder.removeRecords(Record.Type.DIRECT,
RecordName.from(endpoint.dnsName()),
RecordData.from(policy.ipAddress().get()),
Priority.normal);
forwarder.removeRecords(Record.Type.DIRECT,
RecordName.from(endpoint.legacyRegionalDsnName()),
RecordData.from(policy.ipAddress().get()),
Priority.normal);
}
}
}
}
} | RecordName.from(endpoint.legacyRegionalDsnName()), | private void removeApplicationDnsUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList deploymentPolicies, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingId, List<RoutingPolicy>> routingTable = deploymentPolicies.asApplicationRoutingTable();
Set<RoutingId> removalCandidates = new HashSet<>(routingTable.keySet());
Set<RoutingId> activeRoutingIds = applicationRoutingIds(allocation);
removalCandidates.removeAll(activeRoutingIds);
for (var id : removalCandidates) {
TenantAndApplicationId application = TenantAndApplicationId.from(id.instance());
EndpointList endpoints = controller.routing()
.readDeclaredEndpointsOf(application)
.named(id.endpointId());
List<RoutingPolicy> policies = routingTable.get(id);
for (var policy : policies) {
if (!policy.appliesTo(allocation.deployment)) continue;
NameServiceForwarder forwarder = nameServiceForwarderIn(policy.id().zone());
for (Endpoint endpoint : endpoints) {
if (policy.canonicalName().isPresent()) {
forwarder.removeRecords(Record.Type.ALIAS,
RecordName.from(endpoint.dnsName()),
RecordData.fqdn(policy.canonicalName().get().value()),
Priority.normal);
forwarder.removeRecords(Record.Type.ALIAS,
RecordName.from(endpoint.legacyRegionalDnsName()),
RecordData.fqdn(policy.canonicalName().get().value()),
Priority.normal);
} else {
forwarder.removeRecords(Record.Type.DIRECT,
RecordName.from(endpoint.dnsName()),
RecordData.from(policy.ipAddress().get()),
Priority.normal);
forwarder.removeRecords(Record.Type.DIRECT,
RecordName.from(endpoint.legacyRegionalDnsName()),
RecordData.from(policy.ipAddress().get()),
Priority.normal);
}
}
}
}
} | class RoutingPolicies {
private final Controller controller;
private final CuratorDb db;
public RoutingPolicies(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.db = controller.curator();
try (var lock = db.lockRoutingPolicies()) {
for (var policy : db.readRoutingPolicies().entrySet()) {
db.writeRoutingPolicies(policy.getKey(), policy.getValue());
}
}
}
/** Read all routing policies for given deployment */
public RoutingPolicyList read(DeploymentId deployment) {
return read(deployment.applicationId()).deployment(deployment);
}
/** Read all routing policies for given instance */
public RoutingPolicyList read(ApplicationId instance) {
return RoutingPolicyList.copyOf(db.readRoutingPolicies(instance));
}
/** Read all routing policies for given application */
private RoutingPolicyList read(TenantAndApplicationId application) {
return db.readRoutingPolicies((instance) -> TenantAndApplicationId.from(instance).equals(application))
.values()
.stream()
.flatMap(Collection::stream)
.collect(Collectors.collectingAndThen(Collectors.toList(), RoutingPolicyList::copyOf));
}
/** Read all routing policies */
private RoutingPolicyList readAll() {
return db.readRoutingPolicies()
.values()
.stream()
.flatMap(Collection::stream)
.collect(Collectors.collectingAndThen(Collectors.toList(), RoutingPolicyList::copyOf));
}
/** Read routing policy for given zone */
public ZoneRoutingPolicy read(ZoneId zone) {
return db.readZoneRoutingPolicy(zone);
}
/**
* Refresh routing policies for instance in given zone. This is idempotent and changes will only be performed if
* routing configuration affecting given deployment has changed.
*/
public void refresh(DeploymentId deployment, DeploymentSpec deploymentSpec) {
ApplicationId instance = deployment.applicationId();
List<LoadBalancer> loadBalancers = controller.serviceRegistry().configServer()
.getLoadBalancers(instance, deployment.zoneId());
LoadBalancerAllocation allocation = new LoadBalancerAllocation(loadBalancers, deployment, deploymentSpec);
Set<ZoneId> inactiveZones = inactiveZones(instance, deploymentSpec);
try (var lock = db.lockRoutingPolicies()) {
RoutingPolicyList applicationPolicies = read(TenantAndApplicationId.from(instance));
RoutingPolicyList instancePolicies = applicationPolicies.instance(instance);
RoutingPolicyList deploymentPolicies = applicationPolicies.deployment(allocation.deployment);
removeGlobalDnsUnreferencedBy(allocation, deploymentPolicies, lock);
removeApplicationDnsUnreferencedBy(allocation, deploymentPolicies, lock);
instancePolicies = storePoliciesOf(allocation, instancePolicies, lock);
instancePolicies = removePoliciesUnreferencedBy(allocation, instancePolicies, lock);
applicationPolicies = applicationPolicies.replace(instance, instancePolicies);
updateGlobalDnsOf(instancePolicies, inactiveZones, lock);
updateApplicationDnsOf(applicationPolicies, inactiveZones, lock);
}
}
/** Set the status of all global endpoints in given zone */
public void setRoutingStatus(ZoneId zone, RoutingStatus.Value value) {
try (var lock = db.lockRoutingPolicies()) {
db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, RoutingStatus.create(value, RoutingStatus.Agent.operator,
controller.clock().instant())));
Map<ApplicationId, RoutingPolicyList> allPolicies = readAll().groupingBy(policy -> policy.id().owner());
for (var instancePolicies : allPolicies.values()) {
updateGlobalDnsOf(instancePolicies, Set.of(), lock);
}
}
}
/** Set the status of all global endpoints for given deployment */
public void setRoutingStatus(DeploymentId deployment, RoutingStatus.Value value, RoutingStatus.Agent agent) {
ApplicationId instance = deployment.applicationId();
try (var lock = db.lockRoutingPolicies()) {
RoutingPolicyList applicationPolicies = read(TenantAndApplicationId.from(instance));
RoutingPolicyList deploymentPolicies = applicationPolicies.deployment(deployment);
Map<RoutingPolicyId, RoutingPolicy> updatedPolicies = new LinkedHashMap<>(applicationPolicies.asMap());
for (var policy : deploymentPolicies) {
var newPolicy = policy.with(policy.status().with(RoutingStatus.create(value, agent,
controller.clock().instant())));
updatedPolicies.put(policy.id(), newPolicy);
}
RoutingPolicyList effectivePolicies = RoutingPolicyList.copyOf(updatedPolicies.values());
Map<ApplicationId, RoutingPolicyList> policiesByInstance = effectivePolicies.groupingBy(policy -> policy.id().owner());
policiesByInstance.forEach((owner, instancePolicies) -> db.writeRoutingPolicies(owner, instancePolicies.asList()));
policiesByInstance.forEach((ignored, instancePolicies) -> updateGlobalDnsOf(instancePolicies, Set.of(), lock));
updateApplicationDnsOf(effectivePolicies, Set.of(), lock);
}
}
/** Update global DNS records for given policies */
private void updateGlobalDnsOf(RoutingPolicyList instancePolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingId, List<RoutingPolicy>> routingTable = instancePolicies.asInstanceRoutingTable();
for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
RoutingId routingId = routeEntry.getKey();
controller.routing().readDeclaredEndpointsOf(routingId.instance())
.named(routingId.endpointId())
.not().requiresRotation()
.forEach(endpoint -> updateGlobalDnsOf(endpoint, inactiveZones, routeEntry.getValue()));
}
}
/** Update global DNS records for given global endpoint */
private void updateGlobalDnsOf(Endpoint endpoint, Set<ZoneId> inactiveZones, List<RoutingPolicy> policies) {
if (endpoint.scope() != Endpoint.Scope.global) throw new IllegalArgumentException("Endpoint " + endpoint + " is not global");
Collection<RegionEndpoint> regionEndpoints = computeRegionEndpoints(policies, inactiveZones);
regionEndpoints.forEach(regionEndpoint -> {
if ( ! regionEndpoint.zoneAliasTargets().isEmpty()) {
controller.nameServiceForwarder().createAlias(RecordName.from(regionEndpoint.target().name().value()),
regionEndpoint.zoneAliasTargets(),
Priority.normal);
}
if ( ! regionEndpoint.zoneDirectTargets().isEmpty()) {
controller.nameServiceForwarder().createDirect(RecordName.from(regionEndpoint.target().name().value()),
regionEndpoint.zoneDirectTargets(),
Priority.normal);
}
});
Set<AliasTarget> latencyTargets = new LinkedHashSet<>();
Set<AliasTarget> inactiveLatencyTargets = new LinkedHashSet<>();
for (var regionEndpoint : regionEndpoints) {
if (regionEndpoint.active()) {
latencyTargets.add(regionEndpoint.target());
} else {
inactiveLatencyTargets.add(regionEndpoint.target());
}
}
if (latencyTargets.isEmpty() && !inactiveLatencyTargets.isEmpty()) {
latencyTargets.addAll(inactiveLatencyTargets);
inactiveLatencyTargets.clear();
}
controller.nameServiceForwarder().createAlias(RecordName.from(endpoint.dnsName()), latencyTargets, Priority.normal);
inactiveLatencyTargets.forEach(t -> controller.nameServiceForwarder()
.removeRecords(Record.Type.ALIAS,
RecordData.fqdn(t.name().value()),
Priority.normal));
}
/** Compute region endpoints and their targets from given policies */
private Collection<RegionEndpoint> computeRegionEndpoints(List<RoutingPolicy> policies, Set<ZoneId> inactiveZones) {
Map<Endpoint, RegionEndpoint> endpoints = new LinkedHashMap<>();
for (var policy : policies) {
if (policy.dnsZone().isEmpty() && policy.canonicalName().isPresent()) continue;
if (controller.zoneRegistry().routingMethod(policy.id().zone()) != RoutingMethod.exclusive) continue;
Endpoint endpoint = policy.regionEndpointIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry());
var zonePolicy = db.readZoneRoutingPolicy(policy.id().zone());
long weight = 1;
if (isConfiguredOut(zonePolicy, policy, inactiveZones)) {
weight = 0;
}
RegionEndpoint regionEndpoint = endpoints.computeIfAbsent(endpoint, (k) -> new RegionEndpoint(
new LatencyAliasTarget(DomainName.of(endpoint.dnsName()), policy.dnsZone().get(), policy.id().zone())));
if (policy.canonicalName().isPresent()) {
var weightedTarget = new WeightedAliasTarget(
policy.canonicalName().get(), policy.dnsZone().get(), policy.id().zone(), weight);
regionEndpoint.add(weightedTarget);
} else {
var weightedTarget = new WeightedDirectTarget(
RecordData.from(policy.ipAddress().get()), policy.id().zone(), weight);
regionEndpoint.add(weightedTarget);
}
}
return endpoints.values();
}
private void updateApplicationDnsOf(RoutingPolicyList routingPolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingId, List<RoutingPolicy>> routingTable = routingPolicies.asApplicationRoutingTable();
if (routingTable.isEmpty()) return;
Application application = controller.applications().requireApplication(routingTable.keySet().iterator().next().application());
Map<Endpoint, Set<Target>> targetsByEndpoint = new LinkedHashMap<>();
Map<Endpoint, Set<Target>> inactiveTargetsByEndpoint = new LinkedHashMap<>();
for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
RoutingId routingId = routeEntry.getKey();
EndpointList endpoints = controller.routing().declaredEndpointsOf(application)
.scope(Endpoint.Scope.application)
.named(routingId.endpointId());
for (Endpoint endpoint : endpoints) {
for (var policy : routeEntry.getValue()) {
for (var target : endpoint.targets()) {
if (!policy.appliesTo(target.deployment())) continue;
if (policy.dnsZone().isEmpty() && policy.canonicalName().isPresent())
continue;
ZoneRoutingPolicy zonePolicy = db.readZoneRoutingPolicy(policy.id().zone());
Set<Target> activeTargets = targetsByEndpoint.computeIfAbsent(endpoint, (k) -> new LinkedHashSet<>());
Set<Target> inactiveTargets = inactiveTargetsByEndpoint.computeIfAbsent(endpoint, (k) -> new LinkedHashSet<>());
if (isConfiguredOut(zonePolicy, policy, inactiveZones)) {
inactiveTargets.add(Target.weighted(policy, target));
}
else {
activeTargets.add(Target.weighted(policy, target));
}
}
}
}
}
for (var kv : targetsByEndpoint.entrySet()) {
Endpoint endpoint = kv.getKey();
Set<Target> activeTargets = kv.getValue();
if (!activeTargets.isEmpty()) {
continue;
}
Set<Target> inactiveTargets = inactiveTargetsByEndpoint.get(endpoint);
activeTargets.addAll(inactiveTargets);
inactiveTargets.clear();
}
targetsByEndpoint.forEach((applicationEndpoint, targets) -> {
ZoneId targetZone = applicationEndpoint.targets().iterator().next().deployment().zoneId();
Set<AliasTarget> aliasTargets = new LinkedHashSet<>();
Set<DirectTarget> directTargets = new LinkedHashSet<>();
for (Target target : targets) {
if (target.aliasOrDirectTarget() instanceof AliasTarget at) aliasTargets.add(at);
else directTargets.add((DirectTarget) target.aliasOrDirectTarget());
}
if ( ! aliasTargets.isEmpty()) {
nameServiceForwarderIn(targetZone).createAlias(
RecordName.from(applicationEndpoint.dnsName()), aliasTargets, Priority.normal);
nameServiceForwarderIn(targetZone).createAlias(
RecordName.from(applicationEndpoint.legacyRegionalDsnName()), aliasTargets, Priority.normal);
}
if ( ! directTargets.isEmpty()) {
nameServiceForwarderIn(targetZone).createDirect(
RecordName.from(applicationEndpoint.dnsName()), directTargets, Priority.normal);
nameServiceForwarderIn(targetZone).createDirect(
RecordName.from(applicationEndpoint.legacyRegionalDsnName()), directTargets, Priority.normal);
}
});
inactiveTargetsByEndpoint.forEach((applicationEndpoint, targets) -> {
ZoneId targetZone = applicationEndpoint.targets().iterator().next().deployment().zoneId();
targets.forEach(target -> {
nameServiceForwarderIn(targetZone).removeRecords(target.type(),
RecordName.from(applicationEndpoint.dnsName()),
target.data(),
Priority.normal);
nameServiceForwarderIn(targetZone).removeRecords(target.type(),
RecordName.from(applicationEndpoint.legacyRegionalDsnName()),
target.data(),
Priority.normal);
});
});
}
/**
* Store routing policies for given load balancers
*
* @return the updated policies
*/
private RoutingPolicyList storePoliciesOf(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingPolicyId, RoutingPolicy> policies = new LinkedHashMap<>(instancePolicies.asMap());
for (LoadBalancer loadBalancer : allocation.loadBalancers) {
if (loadBalancer.hostname().isEmpty() && loadBalancer.ipAddress().isEmpty()) continue;
var policyId = new RoutingPolicyId(loadBalancer.application(), loadBalancer.cluster(), allocation.deployment.zoneId());
var existingPolicy = policies.get(policyId);
var dnsZone = loadBalancer.ipAddress().isPresent() ? Optional.of("ignored") : loadBalancer.dnsZone();
var newPolicy = new RoutingPolicy(policyId, loadBalancer.hostname(), loadBalancer.ipAddress(), dnsZone,
allocation.instanceEndpointsOf(loadBalancer),
allocation.applicationEndpointsOf(loadBalancer),
new RoutingPolicy.Status(isActive(loadBalancer), RoutingStatus.DEFAULT));
if (existingPolicy != null) {
newPolicy = newPolicy.with(newPolicy.status().with(existingPolicy.status().routingStatus()));
}
updateZoneDnsOf(newPolicy);
policies.put(newPolicy.id(), newPolicy);
}
RoutingPolicyList updated = RoutingPolicyList.copyOf(policies.values());
db.writeRoutingPolicies(allocation.deployment.applicationId(), updated.asList());
return updated;
}
/** Update zone DNS record for given policy */
private void updateZoneDnsOf(RoutingPolicy policy) {
for (var endpoint : policy.zoneEndpointsIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry())) {
var name = RecordName.from(endpoint.dnsName());
var record = policy.canonicalName().isPresent() ?
new Record(Record.Type.CNAME, name, RecordData.fqdn(policy.canonicalName().get().value())) :
new Record(Record.Type.A, name, RecordData.from(policy.ipAddress().orElseThrow()));
nameServiceForwarderIn(policy.id().zone()).createRecord(record, Priority.normal);
}
}
/**
* Remove policies and zone DNS records unreferenced by given load balancers
*
* @return the updated policies
*/
private RoutingPolicyList removePoliciesUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingPolicyId, RoutingPolicy> newPolicies = new LinkedHashMap<>(instancePolicies.asMap());
Set<RoutingPolicyId> activeIds = allocation.asPolicyIds();
RoutingPolicyList removable = instancePolicies.deployment(allocation.deployment)
.not().matching(policy -> activeIds.contains(policy.id()));
for (var policy : removable) {
for (var endpoint : policy.zoneEndpointsIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry())) {
nameServiceForwarderIn(allocation.deployment.zoneId()).removeRecords(Record.Type.CNAME,
RecordName.from(endpoint.dnsName()),
Priority.normal);
}
newPolicies.remove(policy.id());
}
RoutingPolicyList updated = RoutingPolicyList.copyOf(newPolicies.values());
db.writeRoutingPolicies(allocation.deployment.applicationId(), updated.asList());
return updated;
}
/** Remove unreferenced instance endpoints from DNS */
private void removeGlobalDnsUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList deploymentPolicies, @SuppressWarnings("unused") Mutex lock) {
Set<RoutingId> removalCandidates = new HashSet<>(deploymentPolicies.asInstanceRoutingTable().keySet());
Set<RoutingId> activeRoutingIds = instanceRoutingIds(allocation);
removalCandidates.removeAll(activeRoutingIds);
for (var id : removalCandidates) {
EndpointList endpoints = controller.routing().readDeclaredEndpointsOf(id.instance())
.not().requiresRotation()
.named(id.endpointId());
NameServiceForwarder forwarder = nameServiceForwarderIn(allocation.deployment.zoneId());
endpoints.forEach(endpoint -> forwarder.removeRecords(Record.Type.ALIAS, RecordName.from(endpoint.dnsName()),
Priority.normal));
}
}
/** Remove unreferenced application endpoints in given allocation from DNS */
private Set<RoutingId> instanceRoutingIds(LoadBalancerAllocation allocation) {
return routingIdsFrom(allocation, false);
}
private Set<RoutingId> applicationRoutingIds(LoadBalancerAllocation allocation) {
return routingIdsFrom(allocation, true);
}
/** Compute routing IDs from given load balancers */
private static Set<RoutingId> routingIdsFrom(LoadBalancerAllocation allocation, boolean applicationLevel) {
Set<RoutingId> routingIds = new LinkedHashSet<>();
for (var loadBalancer : allocation.loadBalancers) {
Set<EndpointId> endpoints = applicationLevel
? allocation.applicationEndpointsOf(loadBalancer)
: allocation.instanceEndpointsOf(loadBalancer);
for (var endpointId : endpoints) {
routingIds.add(RoutingId.of(loadBalancer.application(), endpointId));
}
}
return Collections.unmodifiableSet(routingIds);
}
/** Returns whether the endpoints of given policy are configured {@link RoutingStatus.Value
private static boolean isConfiguredOut(ZoneRoutingPolicy zonePolicy, RoutingPolicy policy, Set<ZoneId> inactiveZones) {
return zonePolicy.routingStatus().value() == RoutingStatus.Value.out ||
policy.status().routingStatus().value() == RoutingStatus.Value.out ||
inactiveZones.contains(policy.id().zone());
}
private static boolean isActive(LoadBalancer loadBalancer) {
return switch (loadBalancer.state()) {
case reserved, active -> true;
default -> false;
};
}
/** Represents records for a region-wide endpoint */
private static class RegionEndpoint {
private final LatencyAliasTarget target;
private final Set<WeightedAliasTarget> zoneAliasTargets = new LinkedHashSet<>();
private final Set<WeightedDirectTarget> zoneDirectTargets = new LinkedHashSet<>();
public RegionEndpoint(LatencyAliasTarget target) {
this.target = Objects.requireNonNull(target);
}
public LatencyAliasTarget target() { return target; }
public Set<AliasTarget> zoneAliasTargets() { return Collections.unmodifiableSet(zoneAliasTargets); }
public Set<DirectTarget> zoneDirectTargets() { return Collections.unmodifiableSet(zoneDirectTargets); }
public void add(WeightedAliasTarget target) { zoneAliasTargets.add(target); }
public void add(WeightedDirectTarget target) { zoneDirectTargets.add(target); }
public boolean active() {
return zoneAliasTargets.stream().anyMatch(target -> target.weight() > 0) ||
zoneDirectTargets.stream().anyMatch(target -> target.weight() > 0);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RegionEndpoint that = (RegionEndpoint) o;
return target.name().equals(that.target.name());
}
@Override
public int hashCode() {
return Objects.hash(target.name());
}
}
/** Load balancers allocated to a deployment */
private static class LoadBalancerAllocation {
private final DeploymentId deployment;
private final List<LoadBalancer> loadBalancers;
private final DeploymentSpec deploymentSpec;
private LoadBalancerAllocation(List<LoadBalancer> loadBalancers, DeploymentId deployment,
DeploymentSpec deploymentSpec) {
this.deployment = deployment;
this.loadBalancers = List.copyOf(loadBalancers);
this.deploymentSpec = deploymentSpec;
}
/** Returns the policy IDs of the load balancers contained in this */
private Set<RoutingPolicyId> asPolicyIds() {
return loadBalancers.stream()
.map(lb -> new RoutingPolicyId(lb.application(),
lb.cluster(),
deployment.zoneId()))
.collect(Collectors.toUnmodifiableSet());
}
/** Returns all instance endpoint IDs served by given load balancer */
private Set<EndpointId> instanceEndpointsOf(LoadBalancer loadBalancer) {
if (!deployment.zoneId().environment().isProduction()) {
return Set.of();
}
var instanceSpec = deploymentSpec.instance(loadBalancer.application().instance());
if (instanceSpec.isEmpty()) {
return Set.of();
}
if (instanceSpec.get().globalServiceId().filter(id -> id.equals(loadBalancer.cluster().value())).isPresent()) {
return Set.of(EndpointId.defaultId());
}
return instanceSpec.get().endpoints().stream()
.filter(endpoint -> endpoint.containerId().equals(loadBalancer.cluster().value()))
.filter(endpoint -> endpoint.regions().contains(deployment.zoneId().region()))
.map(com.yahoo.config.application.api.Endpoint::endpointId)
.map(EndpointId::of)
.collect(Collectors.toUnmodifiableSet());
}
/** Returns all application endpoint IDs served by given load balancer */
private Set<EndpointId> applicationEndpointsOf(LoadBalancer loadBalancer) {
if (!deployment.zoneId().environment().isProduction()) {
return Set.of();
}
return deploymentSpec.endpoints().stream()
.filter(endpoint -> endpoint.containerId().equals(loadBalancer.cluster().value()))
.filter(endpoint -> endpoint.targets().stream()
.anyMatch(target -> target.region().equals(deployment.zoneId().region()) &&
target.instance().equals(deployment.applicationId().instance())))
.map(com.yahoo.config.application.api.Endpoint::endpointId)
.map(EndpointId::of)
.collect(Collectors.toUnmodifiableSet());
}
}
/** Returns zones where global routing is declared inactive for instance through deploymentSpec */
private static Set<ZoneId> inactiveZones(ApplicationId instance, DeploymentSpec deploymentSpec) {
var instanceSpec = deploymentSpec.instance(instance.instance());
if (instanceSpec.isEmpty()) return Set.of();
return instanceSpec.get().zones().stream()
.filter(zone -> zone.environment().isProduction())
.filter(zone -> !zone.active())
.map(zone -> ZoneId.from(zone.environment(), zone.region().get()))
.collect(Collectors.toUnmodifiableSet());
}
/** Returns the name updater to use for given zone */
private NameServiceForwarder nameServiceForwarderIn(ZoneId zone) {
return switch (controller.zoneRegistry().routingMethod(zone)) {
case exclusive -> controller.nameServiceForwarder();
case sharedLayer4 -> new NameServiceDiscarder(controller.curator());
};
}
/** Denotes record data (record rhs) of either an ALIAS or a DIRECT target */
private record Target(Record.Type type, RecordData data, Object aliasOrDirectTarget) {
static Target weighted(RoutingPolicy policy, Endpoint.Target endpointTarget) {
if (policy.ipAddress().isPresent()) {
var wt = new WeightedDirectTarget(RecordData.from(policy.ipAddress().get()),
endpointTarget.deployment().zoneId(), endpointTarget.weight());
return new Target(Record.Type.DIRECT, wt.recordData(), wt);
}
var wt = new WeightedAliasTarget(policy.canonicalName().get(), policy.dnsZone().get(),
endpointTarget.deployment().zoneId(), endpointTarget.weight());
return new Target(Record.Type.ALIAS, RecordData.fqdn(wt.name().value()), wt);
}
}
/** A {@link NameServiceForwarder} that does nothing. Used in zones where no explicit DNS updates are needed */
private static class NameServiceDiscarder extends NameServiceForwarder {
public NameServiceDiscarder(CuratorDb db) {
super(db);
}
@Override
protected void forward(NameServiceRequest request, Priority priority) {
}
}
} | class RoutingPolicies {
private final Controller controller;
private final CuratorDb db;
public RoutingPolicies(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.db = controller.curator();
try (var lock = db.lockRoutingPolicies()) {
for (var policy : db.readRoutingPolicies().entrySet()) {
db.writeRoutingPolicies(policy.getKey(), policy.getValue());
}
}
}
/** Read all routing policies for given deployment */
public RoutingPolicyList read(DeploymentId deployment) {
return read(deployment.applicationId()).deployment(deployment);
}
/** Read all routing policies for given instance */
public RoutingPolicyList read(ApplicationId instance) {
return RoutingPolicyList.copyOf(db.readRoutingPolicies(instance));
}
/** Read all routing policies for given application */
private RoutingPolicyList read(TenantAndApplicationId application) {
return db.readRoutingPolicies((instance) -> TenantAndApplicationId.from(instance).equals(application))
.values()
.stream()
.flatMap(Collection::stream)
.collect(Collectors.collectingAndThen(Collectors.toList(), RoutingPolicyList::copyOf));
}
/** Read all routing policies */
private RoutingPolicyList readAll() {
return db.readRoutingPolicies()
.values()
.stream()
.flatMap(Collection::stream)
.collect(Collectors.collectingAndThen(Collectors.toList(), RoutingPolicyList::copyOf));
}
/** Read routing policy for given zone */
public ZoneRoutingPolicy read(ZoneId zone) {
return db.readZoneRoutingPolicy(zone);
}
/**
* Refresh routing policies for instance in given zone. This is idempotent and changes will only be performed if
* routing configuration affecting given deployment has changed.
*/
public void refresh(DeploymentId deployment, DeploymentSpec deploymentSpec) {
ApplicationId instance = deployment.applicationId();
List<LoadBalancer> loadBalancers = controller.serviceRegistry().configServer()
.getLoadBalancers(instance, deployment.zoneId());
LoadBalancerAllocation allocation = new LoadBalancerAllocation(loadBalancers, deployment, deploymentSpec);
Set<ZoneId> inactiveZones = inactiveZones(instance, deploymentSpec);
try (var lock = db.lockRoutingPolicies()) {
RoutingPolicyList applicationPolicies = read(TenantAndApplicationId.from(instance));
RoutingPolicyList instancePolicies = applicationPolicies.instance(instance);
RoutingPolicyList deploymentPolicies = applicationPolicies.deployment(allocation.deployment);
removeGlobalDnsUnreferencedBy(allocation, deploymentPolicies, lock);
removeApplicationDnsUnreferencedBy(allocation, deploymentPolicies, lock);
instancePolicies = storePoliciesOf(allocation, instancePolicies, lock);
instancePolicies = removePoliciesUnreferencedBy(allocation, instancePolicies, lock);
applicationPolicies = applicationPolicies.replace(instance, instancePolicies);
updateGlobalDnsOf(instancePolicies, inactiveZones, lock);
updateApplicationDnsOf(applicationPolicies, inactiveZones, lock);
}
}
/** Set the status of all global endpoints in given zone */
public void setRoutingStatus(ZoneId zone, RoutingStatus.Value value) {
try (var lock = db.lockRoutingPolicies()) {
db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, RoutingStatus.create(value, RoutingStatus.Agent.operator,
controller.clock().instant())));
Map<ApplicationId, RoutingPolicyList> allPolicies = readAll().groupingBy(policy -> policy.id().owner());
for (var instancePolicies : allPolicies.values()) {
updateGlobalDnsOf(instancePolicies, Set.of(), lock);
}
}
}
/** Set the status of all global endpoints for given deployment */
public void setRoutingStatus(DeploymentId deployment, RoutingStatus.Value value, RoutingStatus.Agent agent) {
ApplicationId instance = deployment.applicationId();
try (var lock = db.lockRoutingPolicies()) {
RoutingPolicyList applicationPolicies = read(TenantAndApplicationId.from(instance));
RoutingPolicyList deploymentPolicies = applicationPolicies.deployment(deployment);
Map<RoutingPolicyId, RoutingPolicy> updatedPolicies = new LinkedHashMap<>(applicationPolicies.asMap());
for (var policy : deploymentPolicies) {
var newPolicy = policy.with(policy.status().with(RoutingStatus.create(value, agent,
controller.clock().instant())));
updatedPolicies.put(policy.id(), newPolicy);
}
RoutingPolicyList effectivePolicies = RoutingPolicyList.copyOf(updatedPolicies.values());
Map<ApplicationId, RoutingPolicyList> policiesByInstance = effectivePolicies.groupingBy(policy -> policy.id().owner());
policiesByInstance.forEach((owner, instancePolicies) -> db.writeRoutingPolicies(owner, instancePolicies.asList()));
policiesByInstance.forEach((ignored, instancePolicies) -> updateGlobalDnsOf(instancePolicies, Set.of(), lock));
updateApplicationDnsOf(effectivePolicies, Set.of(), lock);
}
}
/** Update global DNS records for given policies */
private void updateGlobalDnsOf(RoutingPolicyList instancePolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingId, List<RoutingPolicy>> routingTable = instancePolicies.asInstanceRoutingTable();
for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
RoutingId routingId = routeEntry.getKey();
controller.routing().readDeclaredEndpointsOf(routingId.instance())
.named(routingId.endpointId())
.not().requiresRotation()
.forEach(endpoint -> updateGlobalDnsOf(endpoint, inactiveZones, routeEntry.getValue()));
}
}
/** Update global DNS records for given global endpoint */
private void updateGlobalDnsOf(Endpoint endpoint, Set<ZoneId> inactiveZones, List<RoutingPolicy> policies) {
if (endpoint.scope() != Endpoint.Scope.global) throw new IllegalArgumentException("Endpoint " + endpoint + " is not global");
Collection<RegionEndpoint> regionEndpoints = computeRegionEndpoints(policies, inactiveZones);
regionEndpoints.forEach(regionEndpoint -> {
if ( ! regionEndpoint.zoneAliasTargets().isEmpty()) {
controller.nameServiceForwarder().createAlias(RecordName.from(regionEndpoint.target().name().value()),
regionEndpoint.zoneAliasTargets(),
Priority.normal);
}
if ( ! regionEndpoint.zoneDirectTargets().isEmpty()) {
controller.nameServiceForwarder().createDirect(RecordName.from(regionEndpoint.target().name().value()),
regionEndpoint.zoneDirectTargets(),
Priority.normal);
}
});
Set<AliasTarget> latencyTargets = new LinkedHashSet<>();
Set<AliasTarget> inactiveLatencyTargets = new LinkedHashSet<>();
for (var regionEndpoint : regionEndpoints) {
if (regionEndpoint.active()) {
latencyTargets.add(regionEndpoint.target());
} else {
inactiveLatencyTargets.add(regionEndpoint.target());
}
}
if (latencyTargets.isEmpty() && !inactiveLatencyTargets.isEmpty()) {
latencyTargets.addAll(inactiveLatencyTargets);
inactiveLatencyTargets.clear();
}
controller.nameServiceForwarder().createAlias(RecordName.from(endpoint.dnsName()), latencyTargets, Priority.normal);
inactiveLatencyTargets.forEach(t -> controller.nameServiceForwarder()
.removeRecords(Record.Type.ALIAS,
RecordData.fqdn(t.name().value()),
Priority.normal));
}
/** Compute region endpoints and their targets from given policies */
private Collection<RegionEndpoint> computeRegionEndpoints(List<RoutingPolicy> policies, Set<ZoneId> inactiveZones) {
Map<Endpoint, RegionEndpoint> endpoints = new LinkedHashMap<>();
for (var policy : policies) {
if (policy.dnsZone().isEmpty() && policy.canonicalName().isPresent()) continue;
if (controller.zoneRegistry().routingMethod(policy.id().zone()) != RoutingMethod.exclusive) continue;
Endpoint endpoint = policy.regionEndpointIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry());
var zonePolicy = db.readZoneRoutingPolicy(policy.id().zone());
long weight = 1;
if (isConfiguredOut(zonePolicy, policy, inactiveZones)) {
weight = 0;
}
RegionEndpoint regionEndpoint = endpoints.computeIfAbsent(endpoint, (k) -> new RegionEndpoint(
new LatencyAliasTarget(DomainName.of(endpoint.dnsName()), policy.dnsZone().get(), policy.id().zone())));
if (policy.canonicalName().isPresent()) {
var weightedTarget = new WeightedAliasTarget(
policy.canonicalName().get(), policy.dnsZone().get(), policy.id().zone(), weight);
regionEndpoint.add(weightedTarget);
} else {
var weightedTarget = new WeightedDirectTarget(
RecordData.from(policy.ipAddress().get()), policy.id().zone(), weight);
regionEndpoint.add(weightedTarget);
}
}
return endpoints.values();
}
private void updateApplicationDnsOf(RoutingPolicyList routingPolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingId, List<RoutingPolicy>> routingTable = routingPolicies.asApplicationRoutingTable();
if (routingTable.isEmpty()) return;
Application application = controller.applications().requireApplication(routingTable.keySet().iterator().next().application());
Map<Endpoint, Set<Target>> targetsByEndpoint = new LinkedHashMap<>();
Map<Endpoint, Set<Target>> inactiveTargetsByEndpoint = new LinkedHashMap<>();
for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
RoutingId routingId = routeEntry.getKey();
EndpointList endpoints = controller.routing().declaredEndpointsOf(application)
.scope(Endpoint.Scope.application)
.named(routingId.endpointId());
for (Endpoint endpoint : endpoints) {
for (var policy : routeEntry.getValue()) {
for (var target : endpoint.targets()) {
if (!policy.appliesTo(target.deployment())) continue;
if (policy.dnsZone().isEmpty() && policy.canonicalName().isPresent())
continue;
ZoneRoutingPolicy zonePolicy = db.readZoneRoutingPolicy(policy.id().zone());
Set<Target> activeTargets = targetsByEndpoint.computeIfAbsent(endpoint, (k) -> new LinkedHashSet<>());
Set<Target> inactiveTargets = inactiveTargetsByEndpoint.computeIfAbsent(endpoint, (k) -> new LinkedHashSet<>());
if (isConfiguredOut(zonePolicy, policy, inactiveZones)) {
inactiveTargets.add(Target.weighted(policy, target));
}
else {
activeTargets.add(Target.weighted(policy, target));
}
}
}
}
}
for (var kv : targetsByEndpoint.entrySet()) {
Endpoint endpoint = kv.getKey();
Set<Target> activeTargets = kv.getValue();
if (!activeTargets.isEmpty()) {
continue;
}
Set<Target> inactiveTargets = inactiveTargetsByEndpoint.get(endpoint);
activeTargets.addAll(inactiveTargets);
inactiveTargets.clear();
}
targetsByEndpoint.forEach((applicationEndpoint, targets) -> {
ZoneId targetZone = applicationEndpoint.targets().iterator().next().deployment().zoneId();
Set<AliasTarget> aliasTargets = new LinkedHashSet<>();
Set<DirectTarget> directTargets = new LinkedHashSet<>();
for (Target target : targets) {
if (target.aliasOrDirectTarget() instanceof AliasTarget at) aliasTargets.add(at);
else directTargets.add((DirectTarget) target.aliasOrDirectTarget());
}
if ( ! aliasTargets.isEmpty()) {
nameServiceForwarderIn(targetZone).createAlias(
RecordName.from(applicationEndpoint.dnsName()), aliasTargets, Priority.normal);
nameServiceForwarderIn(targetZone).createAlias(
RecordName.from(applicationEndpoint.legacyRegionalDnsName()), aliasTargets, Priority.normal);
}
if ( ! directTargets.isEmpty()) {
nameServiceForwarderIn(targetZone).createDirect(
RecordName.from(applicationEndpoint.dnsName()), directTargets, Priority.normal);
nameServiceForwarderIn(targetZone).createDirect(
RecordName.from(applicationEndpoint.legacyRegionalDnsName()), directTargets, Priority.normal);
}
});
inactiveTargetsByEndpoint.forEach((applicationEndpoint, targets) -> {
ZoneId targetZone = applicationEndpoint.targets().iterator().next().deployment().zoneId();
targets.forEach(target -> {
nameServiceForwarderIn(targetZone).removeRecords(target.type(),
RecordName.from(applicationEndpoint.dnsName()),
target.data(),
Priority.normal);
nameServiceForwarderIn(targetZone).removeRecords(target.type(),
RecordName.from(applicationEndpoint.legacyRegionalDnsName()),
target.data(),
Priority.normal);
});
});
}
/**
* Store routing policies for given load balancers
*
* @return the updated policies
*/
private RoutingPolicyList storePoliciesOf(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingPolicyId, RoutingPolicy> policies = new LinkedHashMap<>(instancePolicies.asMap());
for (LoadBalancer loadBalancer : allocation.loadBalancers) {
if (loadBalancer.hostname().isEmpty() && loadBalancer.ipAddress().isEmpty()) continue;
var policyId = new RoutingPolicyId(loadBalancer.application(), loadBalancer.cluster(), allocation.deployment.zoneId());
var existingPolicy = policies.get(policyId);
var dnsZone = loadBalancer.ipAddress().isPresent() ? Optional.of("ignored") : loadBalancer.dnsZone();
var newPolicy = new RoutingPolicy(policyId, loadBalancer.hostname(), loadBalancer.ipAddress(), dnsZone,
allocation.instanceEndpointsOf(loadBalancer),
allocation.applicationEndpointsOf(loadBalancer),
new RoutingPolicy.Status(isActive(loadBalancer), RoutingStatus.DEFAULT));
if (existingPolicy != null) {
newPolicy = newPolicy.with(newPolicy.status().with(existingPolicy.status().routingStatus()));
}
updateZoneDnsOf(newPolicy);
policies.put(newPolicy.id(), newPolicy);
}
RoutingPolicyList updated = RoutingPolicyList.copyOf(policies.values());
db.writeRoutingPolicies(allocation.deployment.applicationId(), updated.asList());
return updated;
}
/** Update zone DNS record for given policy */
private void updateZoneDnsOf(RoutingPolicy policy) {
for (var endpoint : policy.zoneEndpointsIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry())) {
var name = RecordName.from(endpoint.dnsName());
var record = policy.canonicalName().isPresent() ?
new Record(Record.Type.CNAME, name, RecordData.fqdn(policy.canonicalName().get().value())) :
new Record(Record.Type.A, name, RecordData.from(policy.ipAddress().orElseThrow()));
nameServiceForwarderIn(policy.id().zone()).createRecord(record, Priority.normal);
}
}
/**
* Remove policies and zone DNS records unreferenced by given load balancers
*
* @return the updated policies
*/
private RoutingPolicyList removePoliciesUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingPolicyId, RoutingPolicy> newPolicies = new LinkedHashMap<>(instancePolicies.asMap());
Set<RoutingPolicyId> activeIds = allocation.asPolicyIds();
RoutingPolicyList removable = instancePolicies.deployment(allocation.deployment)
.not().matching(policy -> activeIds.contains(policy.id()));
for (var policy : removable) {
for (var endpoint : policy.zoneEndpointsIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry())) {
nameServiceForwarderIn(allocation.deployment.zoneId()).removeRecords(Record.Type.CNAME,
RecordName.from(endpoint.dnsName()),
Priority.normal);
}
newPolicies.remove(policy.id());
}
RoutingPolicyList updated = RoutingPolicyList.copyOf(newPolicies.values());
db.writeRoutingPolicies(allocation.deployment.applicationId(), updated.asList());
return updated;
}
/** Remove unreferenced instance endpoints from DNS */
private void removeGlobalDnsUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList deploymentPolicies, @SuppressWarnings("unused") Mutex lock) {
Set<RoutingId> removalCandidates = new HashSet<>(deploymentPolicies.asInstanceRoutingTable().keySet());
Set<RoutingId> activeRoutingIds = instanceRoutingIds(allocation);
removalCandidates.removeAll(activeRoutingIds);
for (var id : removalCandidates) {
EndpointList endpoints = controller.routing().readDeclaredEndpointsOf(id.instance())
.not().requiresRotation()
.named(id.endpointId());
NameServiceForwarder forwarder = nameServiceForwarderIn(allocation.deployment.zoneId());
endpoints.forEach(endpoint -> forwarder.removeRecords(Record.Type.ALIAS, RecordName.from(endpoint.dnsName()),
Priority.normal));
}
}
/** Remove unreferenced application endpoints in given allocation from DNS */
private Set<RoutingId> instanceRoutingIds(LoadBalancerAllocation allocation) {
return routingIdsFrom(allocation, false);
}
private Set<RoutingId> applicationRoutingIds(LoadBalancerAllocation allocation) {
return routingIdsFrom(allocation, true);
}
/** Compute routing IDs from given load balancers */
private static Set<RoutingId> routingIdsFrom(LoadBalancerAllocation allocation, boolean applicationLevel) {
Set<RoutingId> routingIds = new LinkedHashSet<>();
for (var loadBalancer : allocation.loadBalancers) {
Set<EndpointId> endpoints = applicationLevel
? allocation.applicationEndpointsOf(loadBalancer)
: allocation.instanceEndpointsOf(loadBalancer);
for (var endpointId : endpoints) {
routingIds.add(RoutingId.of(loadBalancer.application(), endpointId));
}
}
return Collections.unmodifiableSet(routingIds);
}
/** Returns whether the endpoints of given policy are configured {@link RoutingStatus.Value
private static boolean isConfiguredOut(ZoneRoutingPolicy zonePolicy, RoutingPolicy policy, Set<ZoneId> inactiveZones) {
return zonePolicy.routingStatus().value() == RoutingStatus.Value.out ||
policy.status().routingStatus().value() == RoutingStatus.Value.out ||
inactiveZones.contains(policy.id().zone());
}
private static boolean isActive(LoadBalancer loadBalancer) {
return switch (loadBalancer.state()) {
case reserved, active -> true;
default -> false;
};
}
/** Represents records for a region-wide endpoint */
private static class RegionEndpoint {
private final LatencyAliasTarget target;
private final Set<WeightedAliasTarget> zoneAliasTargets = new LinkedHashSet<>();
private final Set<WeightedDirectTarget> zoneDirectTargets = new LinkedHashSet<>();
public RegionEndpoint(LatencyAliasTarget target) {
this.target = Objects.requireNonNull(target);
}
public LatencyAliasTarget target() { return target; }
public Set<AliasTarget> zoneAliasTargets() { return Collections.unmodifiableSet(zoneAliasTargets); }
public Set<DirectTarget> zoneDirectTargets() { return Collections.unmodifiableSet(zoneDirectTargets); }
public void add(WeightedAliasTarget target) { zoneAliasTargets.add(target); }
public void add(WeightedDirectTarget target) { zoneDirectTargets.add(target); }
public boolean active() {
return zoneAliasTargets.stream().anyMatch(target -> target.weight() > 0) ||
zoneDirectTargets.stream().anyMatch(target -> target.weight() > 0);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RegionEndpoint that = (RegionEndpoint) o;
return target.name().equals(that.target.name());
}
@Override
public int hashCode() {
return Objects.hash(target.name());
}
}
/** Load balancers allocated to a deployment */
private static class LoadBalancerAllocation {
private final DeploymentId deployment;
private final List<LoadBalancer> loadBalancers;
private final DeploymentSpec deploymentSpec;
private LoadBalancerAllocation(List<LoadBalancer> loadBalancers, DeploymentId deployment,
DeploymentSpec deploymentSpec) {
this.deployment = deployment;
this.loadBalancers = List.copyOf(loadBalancers);
this.deploymentSpec = deploymentSpec;
}
/** Returns the policy IDs of the load balancers contained in this */
private Set<RoutingPolicyId> asPolicyIds() {
return loadBalancers.stream()
.map(lb -> new RoutingPolicyId(lb.application(),
lb.cluster(),
deployment.zoneId()))
.collect(Collectors.toUnmodifiableSet());
}
/** Returns all instance endpoint IDs served by given load balancer */
private Set<EndpointId> instanceEndpointsOf(LoadBalancer loadBalancer) {
if (!deployment.zoneId().environment().isProduction()) {
return Set.of();
}
var instanceSpec = deploymentSpec.instance(loadBalancer.application().instance());
if (instanceSpec.isEmpty()) {
return Set.of();
}
if (instanceSpec.get().globalServiceId().filter(id -> id.equals(loadBalancer.cluster().value())).isPresent()) {
return Set.of(EndpointId.defaultId());
}
return instanceSpec.get().endpoints().stream()
.filter(endpoint -> endpoint.containerId().equals(loadBalancer.cluster().value()))
.filter(endpoint -> endpoint.regions().contains(deployment.zoneId().region()))
.map(com.yahoo.config.application.api.Endpoint::endpointId)
.map(EndpointId::of)
.collect(Collectors.toUnmodifiableSet());
}
/** Returns all application endpoint IDs served by given load balancer */
private Set<EndpointId> applicationEndpointsOf(LoadBalancer loadBalancer) {
if (!deployment.zoneId().environment().isProduction()) {
return Set.of();
}
return deploymentSpec.endpoints().stream()
.filter(endpoint -> endpoint.containerId().equals(loadBalancer.cluster().value()))
.filter(endpoint -> endpoint.targets().stream()
.anyMatch(target -> target.region().equals(deployment.zoneId().region()) &&
target.instance().equals(deployment.applicationId().instance())))
.map(com.yahoo.config.application.api.Endpoint::endpointId)
.map(EndpointId::of)
.collect(Collectors.toUnmodifiableSet());
}
}
/** Returns zones where global routing is declared inactive for instance through deploymentSpec */
private static Set<ZoneId> inactiveZones(ApplicationId instance, DeploymentSpec deploymentSpec) {
var instanceSpec = deploymentSpec.instance(instance.instance());
if (instanceSpec.isEmpty()) return Set.of();
return instanceSpec.get().zones().stream()
.filter(zone -> zone.environment().isProduction())
.filter(zone -> !zone.active())
.map(zone -> ZoneId.from(zone.environment(), zone.region().get()))
.collect(Collectors.toUnmodifiableSet());
}
/** Returns the name updater to use for given zone */
private NameServiceForwarder nameServiceForwarderIn(ZoneId zone) {
return switch (controller.zoneRegistry().routingMethod(zone)) {
case exclusive -> controller.nameServiceForwarder();
case sharedLayer4 -> new NameServiceDiscarder(controller.curator());
};
}
/** Denotes record data (record rhs) of either an ALIAS or a DIRECT target */
private record Target(Record.Type type, RecordData data, Object aliasOrDirectTarget) {
static Target weighted(RoutingPolicy policy, Endpoint.Target endpointTarget) {
if (policy.ipAddress().isPresent()) {
var wt = new WeightedDirectTarget(RecordData.from(policy.ipAddress().get()),
endpointTarget.deployment().zoneId(), endpointTarget.weight());
return new Target(Record.Type.DIRECT, wt.recordData(), wt);
}
var wt = new WeightedAliasTarget(policy.canonicalName().get(), policy.dnsZone().get(),
endpointTarget.deployment().zoneId(), endpointTarget.weight());
return new Target(Record.Type.ALIAS, RecordData.fqdn(wt.name().value()), wt);
}
}
/** A {@link NameServiceForwarder} that does nothing. Used in zones where no explicit DNS updates are needed */
private static class NameServiceDiscarder extends NameServiceForwarder {
public NameServiceDiscarder(CuratorDb db) {
super(db);
}
@Override
protected void forward(NameServiceRequest request, Priority priority) {
}
}
} |
Does this mean that `requestedDnsSans` can contain duplicates now? Instead of doing this, how about having the `EndpointCertificateMetadata` constructor ensure there are no duplicates since we never want that in the certificate, e.g. `this.requestedDnsSans = requestedDnsSans.stream().distinct().toList()` and remove the set wrapping here. | void includes_application_endpoint_when_declared() {
Instance instance = new Instance(ApplicationId.from("t1", "a1", "default"), Tags.empty());
ZoneId zone1 = ZoneId.from(Environment.prod, RegionName.from("aws-us-east-1c"));
ZoneId zone2 = ZoneId.from(Environment.prod, RegionName.from("aws-us-west-2a"));
ControllerTester tester = publicTester();
tester.zoneRegistry().addZones(ZoneApiMock.newBuilder().with(CloudName.DEFAULT).with(zone1).build(),
ZoneApiMock.newBuilder().with(CloudName.AWS).with(zone2).build());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region(zone1.region())
.region(zone2.region())
.applicationEndpoint("a", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 2))
.applicationEndpoint("b", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 1))
.applicationEndpoint("c", "qrs",
Map.of(zone1.region().value(), Map.of(InstanceName.from("beta"), 4,
InstanceName.from("main"), 6),
zone2.region().value(), Map.of(InstanceName.from("main"), 2)))
.build();
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
Set<String> expectedSans = new TreeSet<>(List.of(
"vlfms2wpoa4nyrka2s5lktucypjtxkqhv.internal.vespa-app.cloud",
"a1.t1.g.vespa-app.cloud",
"*.a1.t1.g.vespa-app.cloud",
"a1.t1.a.vespa-app.cloud",
"a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"*.a1.t1.a.vespa-app.cloud",
"*.a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"a1.t1.us-east-1.test.z.vespa-app.cloud",
"*.a1.t1.us-east-1.test.z.vespa-app.cloud",
"a1.t1.us-east-3.staging.z.vespa-app.cloud",
"*.a1.t1.us-east-3.staging.z.vespa-app.cloud"
));
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone1, applicationPackage.deploymentSpec());
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.t1.a1.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.t1.a1.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, new TreeSet<>(endpointCertificateMetadata.get().requestedDnsSans()));
} | assertEquals(expectedSans, new TreeSet<>(endpointCertificateMetadata.get().requestedDnsSans())); | void includes_application_endpoint_when_declared() {
Instance instance = new Instance(ApplicationId.from("t1", "a1", "default"), Tags.empty());
ZoneId zone1 = ZoneId.from(Environment.prod, RegionName.from("aws-us-east-1c"));
ZoneId zone2 = ZoneId.from(Environment.prod, RegionName.from("aws-us-west-2a"));
ControllerTester tester = publicTester();
tester.zoneRegistry().addZones(ZoneApiMock.newBuilder().with(CloudName.DEFAULT).with(zone1).build(),
ZoneApiMock.newBuilder().with(CloudName.AWS).with(zone2).build());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region(zone1.region())
.region(zone2.region())
.applicationEndpoint("a", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 2))
.applicationEndpoint("b", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 1))
.applicationEndpoint("c", "qrs",
Map.of(zone1.region().value(), Map.of(InstanceName.from("beta"), 4,
InstanceName.from("main"), 6),
zone2.region().value(), Map.of(InstanceName.from("main"), 2)))
.build();
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = Stream.of(
"vlfms2wpoa4nyrka2s5lktucypjtxkqhv.internal.vespa-app.cloud",
"a1.t1.g.vespa-app.cloud",
"*.a1.t1.g.vespa-app.cloud",
"a1.t1.a.vespa-app.cloud",
"a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"*.a1.t1.a.vespa-app.cloud",
"*.a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"a1.t1.us-east-1.test.z.vespa-app.cloud",
"*.a1.t1.us-east-1.test.z.vespa-app.cloud",
"a1.t1.us-east-3.staging.z.vespa-app.cloud",
"*.a1.t1.us-east-3.staging.z.vespa-app.cloud"
).sorted().toList();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone1, applicationPackage.deploymentSpec());
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.t1.a1.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.t1.a1.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans().stream().sorted().toList());
} | class EndpointCertificatesTest {
private final ControllerTester tester = new ControllerTester();
private final SecretStoreMock secretStore = new SecretStoreMock();
private final CuratorDb mockCuratorDb = tester.curator();
private final ManualClock clock = tester.clock();
private final EndpointCertificateMock endpointCertificateMock = new EndpointCertificateMock(new ManualClock());
private final EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
private final EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
private final KeyPair testKeyPair = KeyUtils.generateKeypair(KeyAlgorithm.EC, 192);
private X509Certificate testCertificate;
private X509Certificate testCertificate2;
private static final List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud",
"default.default.us-east-1.test.vespa.oath.cloud",
"*.default.default.us-east-1.test.vespa.oath.cloud",
"default.default.us-east-3.staging.vespa.oath.cloud",
"*.default.default.us-east-3.staging.vespa.oath.cloud"
);
private static final List<String> expectedAdditionalSans = List.of(
"default.default.ap-northeast-1.vespa.oath.cloud",
"*.default.default.ap-northeast-1.vespa.oath.cloud"
);
private static final List<String> expectedCombinedSans = new ArrayList<>() {{
addAll(expectedSans);
addAll(expectedAdditionalSans);
}};
private static final List<String> expectedDevSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.us-east-1.dev.vespa.oath.cloud",
"*.default.default.us-east-1.dev.vespa.oath.cloud"
);
private X509Certificate makeTestCert(List<String> sans) {
X509CertificateBuilder x509CertificateBuilder = X509CertificateBuilder
.fromKeypair(
testKeyPair,
new X500Principal("CN=test"),
clock.instant(), clock.instant().plus(5, ChronoUnit.MINUTES),
SignatureAlgorithm.SHA256_WITH_ECDSA,
X509CertificateBuilder.generateRandomSerialNumber());
for (String san : sans) x509CertificateBuilder = x509CertificateBuilder.addSubjectAlternativeName(san);
return x509CertificateBuilder.build();
}
private final Instance testInstance = new Instance(ApplicationId.defaultId(), Tags.empty());
private final String testKeyName = "testKeyName";
private final String testCertName = "testCertName";
private ZoneId testZone;
@BeforeEach
public void setUp() {
tester.zoneRegistry().exclusiveRoutingIn(tester.zoneRegistry().zones().all().zones());
testZone = tester.zoneRegistry().zones().all().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().findFirst().orElseThrow().getId();
clock.setInstant(Instant.EPOCH);
testCertificate = makeTestCert(expectedSans);
testCertificate2 = makeTestCert(expectedCombinedSans);
}
@Test
void provisions_new_certificate_in_dev() {
ZoneId testZone = tester.zoneRegistry().zones().all().routingMethod(RoutingMethod.exclusive).in(Environment.dev).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedDevSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
void provisions_new_certificate_in_prod() {
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
private ControllerTester publicTester() {
ControllerTester publicTester = new ControllerTester(SystemName.Public);
publicTester.zoneRegistry().setZones(tester.zoneRegistry().zones().all().zones());
return publicTester;
}
@Test
void provisions_new_certificate_in_public_prod() {
ControllerTester tester = publicTester();
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.internal.vespa-app.cloud",
"default.default.g.vespa-app.cloud",
"*.default.default.g.vespa-app.cloud",
"default.default.aws-us-east-1a.z.vespa-app.cloud",
"*.default.default.aws-us-east-1a.z.vespa-app.cloud",
"default.default.us-east-1.test.z.vespa-app.cloud",
"*.default.default.us-east-1.test.z.vespa-app.cloud",
"default.default.us-east-3.staging.z.vespa-app.cloud",
"*.default.default.us-east-3.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
void reuses_stored_certificate_metadata() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, 7, 0, "request_id", Optional.of("leaf-request-uuid"),
List.of("vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud"),
"", Optional.empty(), Optional.empty()));
secretStore.setSecret(testKeyName, KeyUtils.toPem(testKeyPair.getPrivate()), 7);
secretStore.setSecret(testCertName, X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 7);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(testKeyName, endpointCertificateMetadata.get().keyName());
assertEquals(testCertName, endpointCertificateMetadata.get().certName());
assertEquals(7, endpointCertificateMetadata.get().version());
}
@Test
void reprovisions_certificate_when_necessary() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "root-request-uuid", Optional.of("leaf-request-uuid"), List.of(), "issuer", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
}
@Test
void reprovisions_certificate_with_added_sans_when_deploying_to_new_zone() {
ZoneId testZone = ZoneId.from("prod.ap-northeast-1");
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "original-request-uuid", Optional.of("leaf-request-uuid"), expectedSans, "mockCa", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), -1);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), -1);
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate2) + X509CertificateUtils.toPem(testCertificate2), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
assertEquals("original-request-uuid", endpointCertificateMetadata.get().rootRequestId());
assertNotEquals(Optional.of("leaf-request-uuid"), endpointCertificateMetadata.get().leafRequestId());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
void includes_zones_in_deployment_spec_when_deploying_to_staging() {
DeploymentSpec deploymentSpec = new DeploymentSpecXmlReader(true).read(
"""
<deployment version="1.0">
<instance id="default">
<prod>
<region active="true">aws-us-east-1a</region>
<region active="true">ap-northeast-1</region>
</prod>
</instance>
</deployment>
"""
);
ZoneId testZone = tester.zoneRegistry().zones().all().in(Environment.staging).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, deploymentSpec);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
} | class EndpointCertificatesTest {
private final ControllerTester tester = new ControllerTester();
private final SecretStoreMock secretStore = new SecretStoreMock();
private final CuratorDb mockCuratorDb = tester.curator();
private final ManualClock clock = tester.clock();
private final EndpointCertificateMock endpointCertificateMock = new EndpointCertificateMock(new ManualClock());
private final EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
private final EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
private final KeyPair testKeyPair = KeyUtils.generateKeypair(KeyAlgorithm.EC, 192);
private X509Certificate testCertificate;
private X509Certificate testCertificate2;
private static final List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud",
"default.default.us-east-1.test.vespa.oath.cloud",
"*.default.default.us-east-1.test.vespa.oath.cloud",
"default.default.us-east-3.staging.vespa.oath.cloud",
"*.default.default.us-east-3.staging.vespa.oath.cloud"
);
private static final List<String> expectedAdditionalSans = List.of(
"default.default.ap-northeast-1.vespa.oath.cloud",
"*.default.default.ap-northeast-1.vespa.oath.cloud"
);
private static final List<String> expectedCombinedSans = new ArrayList<>() {{
addAll(expectedSans);
addAll(expectedAdditionalSans);
}};
private static final List<String> expectedDevSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.us-east-1.dev.vespa.oath.cloud",
"*.default.default.us-east-1.dev.vespa.oath.cloud"
);
private X509Certificate makeTestCert(List<String> sans) {
X509CertificateBuilder x509CertificateBuilder = X509CertificateBuilder
.fromKeypair(
testKeyPair,
new X500Principal("CN=test"),
clock.instant(), clock.instant().plus(5, ChronoUnit.MINUTES),
SignatureAlgorithm.SHA256_WITH_ECDSA,
X509CertificateBuilder.generateRandomSerialNumber());
for (String san : sans) x509CertificateBuilder = x509CertificateBuilder.addSubjectAlternativeName(san);
return x509CertificateBuilder.build();
}
private final Instance testInstance = new Instance(ApplicationId.defaultId(), Tags.empty());
private final String testKeyName = "testKeyName";
private final String testCertName = "testCertName";
private ZoneId testZone;
@BeforeEach
public void setUp() {
tester.zoneRegistry().exclusiveRoutingIn(tester.zoneRegistry().zones().all().zones());
testZone = tester.zoneRegistry().zones().all().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().findFirst().orElseThrow().getId();
clock.setInstant(Instant.EPOCH);
testCertificate = makeTestCert(expectedSans);
testCertificate2 = makeTestCert(expectedCombinedSans);
}
@Test
void provisions_new_certificate_in_dev() {
ZoneId testZone = tester.zoneRegistry().zones().all().routingMethod(RoutingMethod.exclusive).in(Environment.dev).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedDevSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
void provisions_new_certificate_in_prod() {
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
private ControllerTester publicTester() {
ControllerTester publicTester = new ControllerTester(SystemName.Public);
publicTester.zoneRegistry().setZones(tester.zoneRegistry().zones().all().zones());
return publicTester;
}
@Test
void provisions_new_certificate_in_public_prod() {
ControllerTester tester = publicTester();
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.internal.vespa-app.cloud",
"default.default.g.vespa-app.cloud",
"*.default.default.g.vespa-app.cloud",
"default.default.aws-us-east-1a.z.vespa-app.cloud",
"*.default.default.aws-us-east-1a.z.vespa-app.cloud",
"default.default.us-east-1.test.z.vespa-app.cloud",
"*.default.default.us-east-1.test.z.vespa-app.cloud",
"default.default.us-east-3.staging.z.vespa-app.cloud",
"*.default.default.us-east-3.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
void reuses_stored_certificate_metadata() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, 7, 0, "request_id", Optional.of("leaf-request-uuid"),
List.of("vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud"),
"", Optional.empty(), Optional.empty()));
secretStore.setSecret(testKeyName, KeyUtils.toPem(testKeyPair.getPrivate()), 7);
secretStore.setSecret(testCertName, X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 7);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(testKeyName, endpointCertificateMetadata.get().keyName());
assertEquals(testCertName, endpointCertificateMetadata.get().certName());
assertEquals(7, endpointCertificateMetadata.get().version());
}
@Test
void reprovisions_certificate_when_necessary() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "root-request-uuid", Optional.of("leaf-request-uuid"), List.of(), "issuer", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
}
@Test
void reprovisions_certificate_with_added_sans_when_deploying_to_new_zone() {
ZoneId testZone = ZoneId.from("prod.ap-northeast-1");
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "original-request-uuid", Optional.of("leaf-request-uuid"), expectedSans, "mockCa", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), -1);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), -1);
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate2) + X509CertificateUtils.toPem(testCertificate2), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
assertEquals("original-request-uuid", endpointCertificateMetadata.get().rootRequestId());
assertNotEquals(Optional.of("leaf-request-uuid"), endpointCertificateMetadata.get().leafRequestId());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
void includes_zones_in_deployment_spec_when_deploying_to_staging() {
DeploymentSpec deploymentSpec = new DeploymentSpecXmlReader(true).read(
"""
<deployment version="1.0">
<instance id="default">
<prod>
<region active="true">aws-us-east-1a</region>
<region active="true">ap-northeast-1</region>
</prod>
</instance>
</deployment>
"""
);
ZoneId testZone = tester.zoneRegistry().zones().all().in(Environment.staging).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, deploymentSpec);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
} |
No, that was just because I got tired of reading a super-long line, trying to decipher the actual order of arguments that was required >_< I can sort the lists instead ... | void includes_application_endpoint_when_declared() {
Instance instance = new Instance(ApplicationId.from("t1", "a1", "default"), Tags.empty());
ZoneId zone1 = ZoneId.from(Environment.prod, RegionName.from("aws-us-east-1c"));
ZoneId zone2 = ZoneId.from(Environment.prod, RegionName.from("aws-us-west-2a"));
ControllerTester tester = publicTester();
tester.zoneRegistry().addZones(ZoneApiMock.newBuilder().with(CloudName.DEFAULT).with(zone1).build(),
ZoneApiMock.newBuilder().with(CloudName.AWS).with(zone2).build());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region(zone1.region())
.region(zone2.region())
.applicationEndpoint("a", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 2))
.applicationEndpoint("b", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 1))
.applicationEndpoint("c", "qrs",
Map.of(zone1.region().value(), Map.of(InstanceName.from("beta"), 4,
InstanceName.from("main"), 6),
zone2.region().value(), Map.of(InstanceName.from("main"), 2)))
.build();
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
Set<String> expectedSans = new TreeSet<>(List.of(
"vlfms2wpoa4nyrka2s5lktucypjtxkqhv.internal.vespa-app.cloud",
"a1.t1.g.vespa-app.cloud",
"*.a1.t1.g.vespa-app.cloud",
"a1.t1.a.vespa-app.cloud",
"a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"*.a1.t1.a.vespa-app.cloud",
"*.a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"a1.t1.us-east-1.test.z.vespa-app.cloud",
"*.a1.t1.us-east-1.test.z.vespa-app.cloud",
"a1.t1.us-east-3.staging.z.vespa-app.cloud",
"*.a1.t1.us-east-3.staging.z.vespa-app.cloud"
));
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone1, applicationPackage.deploymentSpec());
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.t1.a1.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.t1.a1.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, new TreeSet<>(endpointCertificateMetadata.get().requestedDnsSans()));
} | assertEquals(expectedSans, new TreeSet<>(endpointCertificateMetadata.get().requestedDnsSans())); | void includes_application_endpoint_when_declared() {
Instance instance = new Instance(ApplicationId.from("t1", "a1", "default"), Tags.empty());
ZoneId zone1 = ZoneId.from(Environment.prod, RegionName.from("aws-us-east-1c"));
ZoneId zone2 = ZoneId.from(Environment.prod, RegionName.from("aws-us-west-2a"));
ControllerTester tester = publicTester();
tester.zoneRegistry().addZones(ZoneApiMock.newBuilder().with(CloudName.DEFAULT).with(zone1).build(),
ZoneApiMock.newBuilder().with(CloudName.AWS).with(zone2).build());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region(zone1.region())
.region(zone2.region())
.applicationEndpoint("a", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 2))
.applicationEndpoint("b", "qrs", zone2.region().value(),
Map.of(InstanceName.from("beta"), 1))
.applicationEndpoint("c", "qrs",
Map.of(zone1.region().value(), Map.of(InstanceName.from("beta"), 4,
InstanceName.from("main"), 6),
zone2.region().value(), Map.of(InstanceName.from("main"), 2)))
.build();
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = Stream.of(
"vlfms2wpoa4nyrka2s5lktucypjtxkqhv.internal.vespa-app.cloud",
"a1.t1.g.vespa-app.cloud",
"*.a1.t1.g.vespa-app.cloud",
"a1.t1.a.vespa-app.cloud",
"a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"*.a1.t1.a.vespa-app.cloud",
"*.a1.t1.aws-us-west-2a.r.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.r.vespa-app.cloud",
"a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"*.a1.t1.aws-us-east-1c.z.vespa-app.cloud",
"a1.t1.us-east-1.test.z.vespa-app.cloud",
"*.a1.t1.us-east-1.test.z.vespa-app.cloud",
"a1.t1.us-east-3.staging.z.vespa-app.cloud",
"*.a1.t1.us-east-3.staging.z.vespa-app.cloud"
).sorted().toList();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone1, applicationPackage.deploymentSpec());
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.t1.a1.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.t1.a1.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans().stream().sorted().toList());
} | class EndpointCertificatesTest {
private final ControllerTester tester = new ControllerTester();
private final SecretStoreMock secretStore = new SecretStoreMock();
private final CuratorDb mockCuratorDb = tester.curator();
private final ManualClock clock = tester.clock();
private final EndpointCertificateMock endpointCertificateMock = new EndpointCertificateMock(new ManualClock());
private final EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
private final EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
private final KeyPair testKeyPair = KeyUtils.generateKeypair(KeyAlgorithm.EC, 192);
private X509Certificate testCertificate;
private X509Certificate testCertificate2;
private static final List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud",
"default.default.us-east-1.test.vespa.oath.cloud",
"*.default.default.us-east-1.test.vespa.oath.cloud",
"default.default.us-east-3.staging.vespa.oath.cloud",
"*.default.default.us-east-3.staging.vespa.oath.cloud"
);
private static final List<String> expectedAdditionalSans = List.of(
"default.default.ap-northeast-1.vespa.oath.cloud",
"*.default.default.ap-northeast-1.vespa.oath.cloud"
);
private static final List<String> expectedCombinedSans = new ArrayList<>() {{
addAll(expectedSans);
addAll(expectedAdditionalSans);
}};
private static final List<String> expectedDevSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.us-east-1.dev.vespa.oath.cloud",
"*.default.default.us-east-1.dev.vespa.oath.cloud"
);
private X509Certificate makeTestCert(List<String> sans) {
X509CertificateBuilder x509CertificateBuilder = X509CertificateBuilder
.fromKeypair(
testKeyPair,
new X500Principal("CN=test"),
clock.instant(), clock.instant().plus(5, ChronoUnit.MINUTES),
SignatureAlgorithm.SHA256_WITH_ECDSA,
X509CertificateBuilder.generateRandomSerialNumber());
for (String san : sans) x509CertificateBuilder = x509CertificateBuilder.addSubjectAlternativeName(san);
return x509CertificateBuilder.build();
}
private final Instance testInstance = new Instance(ApplicationId.defaultId(), Tags.empty());
private final String testKeyName = "testKeyName";
private final String testCertName = "testCertName";
private ZoneId testZone;
@BeforeEach
public void setUp() {
tester.zoneRegistry().exclusiveRoutingIn(tester.zoneRegistry().zones().all().zones());
testZone = tester.zoneRegistry().zones().all().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().findFirst().orElseThrow().getId();
clock.setInstant(Instant.EPOCH);
testCertificate = makeTestCert(expectedSans);
testCertificate2 = makeTestCert(expectedCombinedSans);
}
@Test
void provisions_new_certificate_in_dev() {
ZoneId testZone = tester.zoneRegistry().zones().all().routingMethod(RoutingMethod.exclusive).in(Environment.dev).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedDevSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
void provisions_new_certificate_in_prod() {
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
private ControllerTester publicTester() {
ControllerTester publicTester = new ControllerTester(SystemName.Public);
publicTester.zoneRegistry().setZones(tester.zoneRegistry().zones().all().zones());
return publicTester;
}
@Test
void provisions_new_certificate_in_public_prod() {
ControllerTester tester = publicTester();
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.internal.vespa-app.cloud",
"default.default.g.vespa-app.cloud",
"*.default.default.g.vespa-app.cloud",
"default.default.aws-us-east-1a.z.vespa-app.cloud",
"*.default.default.aws-us-east-1a.z.vespa-app.cloud",
"default.default.us-east-1.test.z.vespa-app.cloud",
"*.default.default.us-east-1.test.z.vespa-app.cloud",
"default.default.us-east-3.staging.z.vespa-app.cloud",
"*.default.default.us-east-3.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
void reuses_stored_certificate_metadata() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, 7, 0, "request_id", Optional.of("leaf-request-uuid"),
List.of("vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud"),
"", Optional.empty(), Optional.empty()));
secretStore.setSecret(testKeyName, KeyUtils.toPem(testKeyPair.getPrivate()), 7);
secretStore.setSecret(testCertName, X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 7);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(testKeyName, endpointCertificateMetadata.get().keyName());
assertEquals(testCertName, endpointCertificateMetadata.get().certName());
assertEquals(7, endpointCertificateMetadata.get().version());
}
@Test
void reprovisions_certificate_when_necessary() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "root-request-uuid", Optional.of("leaf-request-uuid"), List.of(), "issuer", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
}
@Test
void reprovisions_certificate_with_added_sans_when_deploying_to_new_zone() {
ZoneId testZone = ZoneId.from("prod.ap-northeast-1");
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "original-request-uuid", Optional.of("leaf-request-uuid"), expectedSans, "mockCa", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), -1);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), -1);
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate2) + X509CertificateUtils.toPem(testCertificate2), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
assertEquals("original-request-uuid", endpointCertificateMetadata.get().rootRequestId());
assertNotEquals(Optional.of("leaf-request-uuid"), endpointCertificateMetadata.get().leafRequestId());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
void includes_zones_in_deployment_spec_when_deploying_to_staging() {
DeploymentSpec deploymentSpec = new DeploymentSpecXmlReader(true).read(
"""
<deployment version="1.0">
<instance id="default">
<prod>
<region active="true">aws-us-east-1a</region>
<region active="true">ap-northeast-1</region>
</prod>
</instance>
</deployment>
"""
);
ZoneId testZone = tester.zoneRegistry().zones().all().in(Environment.staging).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, deploymentSpec);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
} | class EndpointCertificatesTest {
private final ControllerTester tester = new ControllerTester();
private final SecretStoreMock secretStore = new SecretStoreMock();
private final CuratorDb mockCuratorDb = tester.curator();
private final ManualClock clock = tester.clock();
private final EndpointCertificateMock endpointCertificateMock = new EndpointCertificateMock(new ManualClock());
private final EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
private final EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
private final KeyPair testKeyPair = KeyUtils.generateKeypair(KeyAlgorithm.EC, 192);
private X509Certificate testCertificate;
private X509Certificate testCertificate2;
private static final List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud",
"default.default.us-east-1.test.vespa.oath.cloud",
"*.default.default.us-east-1.test.vespa.oath.cloud",
"default.default.us-east-3.staging.vespa.oath.cloud",
"*.default.default.us-east-3.staging.vespa.oath.cloud"
);
private static final List<String> expectedAdditionalSans = List.of(
"default.default.ap-northeast-1.vespa.oath.cloud",
"*.default.default.ap-northeast-1.vespa.oath.cloud"
);
private static final List<String> expectedCombinedSans = new ArrayList<>() {{
addAll(expectedSans);
addAll(expectedAdditionalSans);
}};
private static final List<String> expectedDevSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.us-east-1.dev.vespa.oath.cloud",
"*.default.default.us-east-1.dev.vespa.oath.cloud"
);
private X509Certificate makeTestCert(List<String> sans) {
X509CertificateBuilder x509CertificateBuilder = X509CertificateBuilder
.fromKeypair(
testKeyPair,
new X500Principal("CN=test"),
clock.instant(), clock.instant().plus(5, ChronoUnit.MINUTES),
SignatureAlgorithm.SHA256_WITH_ECDSA,
X509CertificateBuilder.generateRandomSerialNumber());
for (String san : sans) x509CertificateBuilder = x509CertificateBuilder.addSubjectAlternativeName(san);
return x509CertificateBuilder.build();
}
private final Instance testInstance = new Instance(ApplicationId.defaultId(), Tags.empty());
private final String testKeyName = "testKeyName";
private final String testCertName = "testCertName";
private ZoneId testZone;
@BeforeEach
public void setUp() {
tester.zoneRegistry().exclusiveRoutingIn(tester.zoneRegistry().zones().all().zones());
testZone = tester.zoneRegistry().zones().all().routingMethod(RoutingMethod.exclusive).in(Environment.prod).zones().stream().findFirst().orElseThrow().getId();
clock.setInstant(Instant.EPOCH);
testCertificate = makeTestCert(expectedSans);
testCertificate2 = makeTestCert(expectedCombinedSans);
}
@Test
void provisions_new_certificate_in_dev() {
ZoneId testZone = tester.zoneRegistry().zones().all().routingMethod(RoutingMethod.exclusive).in(Environment.dev).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedDevSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
void provisions_new_certificate_in_prod() {
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
private ControllerTester publicTester() {
ControllerTester publicTester = new ControllerTester(SystemName.Public);
publicTester.zoneRegistry().setZones(tester.zoneRegistry().zones().all().zones());
return publicTester;
}
@Test
void provisions_new_certificate_in_public_prod() {
ControllerTester tester = publicTester();
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
"vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.internal.vespa-app.cloud",
"default.default.g.vespa-app.cloud",
"*.default.default.g.vespa-app.cloud",
"default.default.aws-us-east-1a.z.vespa-app.cloud",
"*.default.default.aws-us-east-1a.z.vespa-app.cloud",
"default.default.us-east-1.test.z.vespa-app.cloud",
"*.default.default.us-east-1.test.z.vespa-app.cloud",
"default.default.us-east-3.staging.z.vespa-app.cloud",
"*.default.default.us-east-3.staging.z.vespa-app.cloud"
);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(expectedSans, endpointCertificateMetadata.get().requestedDnsSans());
}
@Test
void reuses_stored_certificate_metadata() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, 7, 0, "request_id", Optional.of("leaf-request-uuid"),
List.of("vt2ktgkqme5zlnp4tj4ttyor7fj3v7q5o.vespa.oath.cloud",
"default.default.global.vespa.oath.cloud",
"*.default.default.global.vespa.oath.cloud",
"default.default.aws-us-east-1a.vespa.oath.cloud",
"*.default.default.aws-us-east-1a.vespa.oath.cloud"),
"", Optional.empty(), Optional.empty()));
secretStore.setSecret(testKeyName, KeyUtils.toPem(testKeyPair.getPrivate()), 7);
secretStore.setSecret(testCertName, X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 7);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(testKeyName, endpointCertificateMetadata.get().keyName());
assertEquals(testCertName, endpointCertificateMetadata.get().certName());
assertEquals(7, endpointCertificateMetadata.get().version());
}
@Test
void reprovisions_certificate_when_necessary() {
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "root-request-uuid", Optional.of("leaf-request-uuid"), List.of(), "issuer", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
}
@Test
void reprovisions_certificate_with_added_sans_when_deploying_to_new_zone() {
ZoneId testZone = ZoneId.from("prod.ap-northeast-1");
mockCuratorDb.writeEndpointCertificateMetadata(testInstance.id(), new EndpointCertificateMetadata(testKeyName, testCertName, -1, 0, "original-request-uuid", Optional.of("leaf-request-uuid"), expectedSans, "mockCa", Optional.empty(), Optional.empty()));
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), -1);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate) + X509CertificateUtils.toPem(testCertificate), -1);
secretStore.setSecret("vespa.tls.default.default.default-key", KeyUtils.toPem(testKeyPair.getPrivate()), 0);
secretStore.setSecret("vespa.tls.default.default.default-cert", X509CertificateUtils.toPem(testCertificate2) + X509CertificateUtils.toPem(testCertificate2), 0);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, DeploymentSpec.empty);
assertTrue(endpointCertificateMetadata.isPresent());
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(endpointCertificateMetadata, mockCuratorDb.readEndpointCertificateMetadata(testInstance.id()));
assertEquals("original-request-uuid", endpointCertificateMetadata.get().rootRequestId());
assertNotEquals(Optional.of("leaf-request-uuid"), endpointCertificateMetadata.get().leafRequestId());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
void includes_zones_in_deployment_spec_when_deploying_to_staging() {
DeploymentSpec deploymentSpec = new DeploymentSpecXmlReader(true).read(
"""
<deployment version="1.0">
<instance id="default">
<prod>
<region active="true">aws-us-east-1a</region>
<region active="true">ap-northeast-1</region>
</prod>
</instance>
</deployment>
"""
);
ZoneId testZone = tester.zoneRegistry().zones().all().in(Environment.staging).zones().stream().findFirst().orElseThrow().getId();
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = endpointCertificates.getMetadata(testInstance, testZone, deploymentSpec);
assertTrue(endpointCertificateMetadata.isPresent());
assertTrue(endpointCertificateMetadata.get().keyName().matches("vespa.tls.default.default.*-key"));
assertTrue(endpointCertificateMetadata.get().certName().matches("vespa.tls.default.default.*-cert"));
assertEquals(0, endpointCertificateMetadata.get().version());
assertEquals(Set.copyOf(expectedCombinedSans), Set.copyOf(endpointCertificateMetadata.get().requestedDnsSans()));
}
@Test
} |
Harigato. | private void removeApplicationDnsUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList deploymentPolicies, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingId, List<RoutingPolicy>> routingTable = deploymentPolicies.asApplicationRoutingTable();
Set<RoutingId> removalCandidates = new HashSet<>(routingTable.keySet());
Set<RoutingId> activeRoutingIds = applicationRoutingIds(allocation);
removalCandidates.removeAll(activeRoutingIds);
for (var id : removalCandidates) {
TenantAndApplicationId application = TenantAndApplicationId.from(id.instance());
EndpointList endpoints = controller.routing()
.readDeclaredEndpointsOf(application)
.named(id.endpointId());
List<RoutingPolicy> policies = routingTable.get(id);
for (var policy : policies) {
if (!policy.appliesTo(allocation.deployment)) continue;
NameServiceForwarder forwarder = nameServiceForwarderIn(policy.id().zone());
for (Endpoint endpoint : endpoints) {
if (policy.canonicalName().isPresent()) {
forwarder.removeRecords(Record.Type.ALIAS,
RecordName.from(endpoint.dnsName()),
RecordData.fqdn(policy.canonicalName().get().value()),
Priority.normal);
forwarder.removeRecords(Record.Type.ALIAS,
RecordName.from(endpoint.legacyRegionalDsnName()),
RecordData.fqdn(policy.canonicalName().get().value()),
Priority.normal);
} else {
forwarder.removeRecords(Record.Type.DIRECT,
RecordName.from(endpoint.dnsName()),
RecordData.from(policy.ipAddress().get()),
Priority.normal);
forwarder.removeRecords(Record.Type.DIRECT,
RecordName.from(endpoint.legacyRegionalDsnName()),
RecordData.from(policy.ipAddress().get()),
Priority.normal);
}
}
}
}
} | RecordName.from(endpoint.legacyRegionalDsnName()), | private void removeApplicationDnsUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList deploymentPolicies, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingId, List<RoutingPolicy>> routingTable = deploymentPolicies.asApplicationRoutingTable();
Set<RoutingId> removalCandidates = new HashSet<>(routingTable.keySet());
Set<RoutingId> activeRoutingIds = applicationRoutingIds(allocation);
removalCandidates.removeAll(activeRoutingIds);
for (var id : removalCandidates) {
TenantAndApplicationId application = TenantAndApplicationId.from(id.instance());
EndpointList endpoints = controller.routing()
.readDeclaredEndpointsOf(application)
.named(id.endpointId());
List<RoutingPolicy> policies = routingTable.get(id);
for (var policy : policies) {
if (!policy.appliesTo(allocation.deployment)) continue;
NameServiceForwarder forwarder = nameServiceForwarderIn(policy.id().zone());
for (Endpoint endpoint : endpoints) {
if (policy.canonicalName().isPresent()) {
forwarder.removeRecords(Record.Type.ALIAS,
RecordName.from(endpoint.dnsName()),
RecordData.fqdn(policy.canonicalName().get().value()),
Priority.normal);
forwarder.removeRecords(Record.Type.ALIAS,
RecordName.from(endpoint.legacyRegionalDnsName()),
RecordData.fqdn(policy.canonicalName().get().value()),
Priority.normal);
} else {
forwarder.removeRecords(Record.Type.DIRECT,
RecordName.from(endpoint.dnsName()),
RecordData.from(policy.ipAddress().get()),
Priority.normal);
forwarder.removeRecords(Record.Type.DIRECT,
RecordName.from(endpoint.legacyRegionalDnsName()),
RecordData.from(policy.ipAddress().get()),
Priority.normal);
}
}
}
}
} | class RoutingPolicies {
private final Controller controller;
private final CuratorDb db;
public RoutingPolicies(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.db = controller.curator();
try (var lock = db.lockRoutingPolicies()) {
for (var policy : db.readRoutingPolicies().entrySet()) {
db.writeRoutingPolicies(policy.getKey(), policy.getValue());
}
}
}
/** Read all routing policies for given deployment */
public RoutingPolicyList read(DeploymentId deployment) {
return read(deployment.applicationId()).deployment(deployment);
}
/** Read all routing policies for given instance */
public RoutingPolicyList read(ApplicationId instance) {
return RoutingPolicyList.copyOf(db.readRoutingPolicies(instance));
}
/** Read all routing policies for given application */
private RoutingPolicyList read(TenantAndApplicationId application) {
return db.readRoutingPolicies((instance) -> TenantAndApplicationId.from(instance).equals(application))
.values()
.stream()
.flatMap(Collection::stream)
.collect(Collectors.collectingAndThen(Collectors.toList(), RoutingPolicyList::copyOf));
}
/** Read all routing policies */
private RoutingPolicyList readAll() {
return db.readRoutingPolicies()
.values()
.stream()
.flatMap(Collection::stream)
.collect(Collectors.collectingAndThen(Collectors.toList(), RoutingPolicyList::copyOf));
}
/** Read routing policy for given zone */
public ZoneRoutingPolicy read(ZoneId zone) {
return db.readZoneRoutingPolicy(zone);
}
/**
* Refresh routing policies for instance in given zone. This is idempotent and changes will only be performed if
* routing configuration affecting given deployment has changed.
*/
public void refresh(DeploymentId deployment, DeploymentSpec deploymentSpec) {
ApplicationId instance = deployment.applicationId();
List<LoadBalancer> loadBalancers = controller.serviceRegistry().configServer()
.getLoadBalancers(instance, deployment.zoneId());
LoadBalancerAllocation allocation = new LoadBalancerAllocation(loadBalancers, deployment, deploymentSpec);
Set<ZoneId> inactiveZones = inactiveZones(instance, deploymentSpec);
try (var lock = db.lockRoutingPolicies()) {
RoutingPolicyList applicationPolicies = read(TenantAndApplicationId.from(instance));
RoutingPolicyList instancePolicies = applicationPolicies.instance(instance);
RoutingPolicyList deploymentPolicies = applicationPolicies.deployment(allocation.deployment);
removeGlobalDnsUnreferencedBy(allocation, deploymentPolicies, lock);
removeApplicationDnsUnreferencedBy(allocation, deploymentPolicies, lock);
instancePolicies = storePoliciesOf(allocation, instancePolicies, lock);
instancePolicies = removePoliciesUnreferencedBy(allocation, instancePolicies, lock);
applicationPolicies = applicationPolicies.replace(instance, instancePolicies);
updateGlobalDnsOf(instancePolicies, inactiveZones, lock);
updateApplicationDnsOf(applicationPolicies, inactiveZones, lock);
}
}
/** Set the status of all global endpoints in given zone */
public void setRoutingStatus(ZoneId zone, RoutingStatus.Value value) {
try (var lock = db.lockRoutingPolicies()) {
db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, RoutingStatus.create(value, RoutingStatus.Agent.operator,
controller.clock().instant())));
Map<ApplicationId, RoutingPolicyList> allPolicies = readAll().groupingBy(policy -> policy.id().owner());
for (var instancePolicies : allPolicies.values()) {
updateGlobalDnsOf(instancePolicies, Set.of(), lock);
}
}
}
/** Set the status of all global endpoints for given deployment */
public void setRoutingStatus(DeploymentId deployment, RoutingStatus.Value value, RoutingStatus.Agent agent) {
ApplicationId instance = deployment.applicationId();
try (var lock = db.lockRoutingPolicies()) {
RoutingPolicyList applicationPolicies = read(TenantAndApplicationId.from(instance));
RoutingPolicyList deploymentPolicies = applicationPolicies.deployment(deployment);
Map<RoutingPolicyId, RoutingPolicy> updatedPolicies = new LinkedHashMap<>(applicationPolicies.asMap());
for (var policy : deploymentPolicies) {
var newPolicy = policy.with(policy.status().with(RoutingStatus.create(value, agent,
controller.clock().instant())));
updatedPolicies.put(policy.id(), newPolicy);
}
RoutingPolicyList effectivePolicies = RoutingPolicyList.copyOf(updatedPolicies.values());
Map<ApplicationId, RoutingPolicyList> policiesByInstance = effectivePolicies.groupingBy(policy -> policy.id().owner());
policiesByInstance.forEach((owner, instancePolicies) -> db.writeRoutingPolicies(owner, instancePolicies.asList()));
policiesByInstance.forEach((ignored, instancePolicies) -> updateGlobalDnsOf(instancePolicies, Set.of(), lock));
updateApplicationDnsOf(effectivePolicies, Set.of(), lock);
}
}
/** Update global DNS records for given policies */
private void updateGlobalDnsOf(RoutingPolicyList instancePolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingId, List<RoutingPolicy>> routingTable = instancePolicies.asInstanceRoutingTable();
for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
RoutingId routingId = routeEntry.getKey();
controller.routing().readDeclaredEndpointsOf(routingId.instance())
.named(routingId.endpointId())
.not().requiresRotation()
.forEach(endpoint -> updateGlobalDnsOf(endpoint, inactiveZones, routeEntry.getValue()));
}
}
/** Update global DNS records for given global endpoint */
private void updateGlobalDnsOf(Endpoint endpoint, Set<ZoneId> inactiveZones, List<RoutingPolicy> policies) {
if (endpoint.scope() != Endpoint.Scope.global) throw new IllegalArgumentException("Endpoint " + endpoint + " is not global");
Collection<RegionEndpoint> regionEndpoints = computeRegionEndpoints(policies, inactiveZones);
regionEndpoints.forEach(regionEndpoint -> {
if ( ! regionEndpoint.zoneAliasTargets().isEmpty()) {
controller.nameServiceForwarder().createAlias(RecordName.from(regionEndpoint.target().name().value()),
regionEndpoint.zoneAliasTargets(),
Priority.normal);
}
if ( ! regionEndpoint.zoneDirectTargets().isEmpty()) {
controller.nameServiceForwarder().createDirect(RecordName.from(regionEndpoint.target().name().value()),
regionEndpoint.zoneDirectTargets(),
Priority.normal);
}
});
Set<AliasTarget> latencyTargets = new LinkedHashSet<>();
Set<AliasTarget> inactiveLatencyTargets = new LinkedHashSet<>();
for (var regionEndpoint : regionEndpoints) {
if (regionEndpoint.active()) {
latencyTargets.add(regionEndpoint.target());
} else {
inactiveLatencyTargets.add(regionEndpoint.target());
}
}
if (latencyTargets.isEmpty() && !inactiveLatencyTargets.isEmpty()) {
latencyTargets.addAll(inactiveLatencyTargets);
inactiveLatencyTargets.clear();
}
controller.nameServiceForwarder().createAlias(RecordName.from(endpoint.dnsName()), latencyTargets, Priority.normal);
inactiveLatencyTargets.forEach(t -> controller.nameServiceForwarder()
.removeRecords(Record.Type.ALIAS,
RecordData.fqdn(t.name().value()),
Priority.normal));
}
/** Compute region endpoints and their targets from given policies */
private Collection<RegionEndpoint> computeRegionEndpoints(List<RoutingPolicy> policies, Set<ZoneId> inactiveZones) {
Map<Endpoint, RegionEndpoint> endpoints = new LinkedHashMap<>();
for (var policy : policies) {
if (policy.dnsZone().isEmpty() && policy.canonicalName().isPresent()) continue;
if (controller.zoneRegistry().routingMethod(policy.id().zone()) != RoutingMethod.exclusive) continue;
Endpoint endpoint = policy.regionEndpointIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry());
var zonePolicy = db.readZoneRoutingPolicy(policy.id().zone());
long weight = 1;
if (isConfiguredOut(zonePolicy, policy, inactiveZones)) {
weight = 0;
}
RegionEndpoint regionEndpoint = endpoints.computeIfAbsent(endpoint, (k) -> new RegionEndpoint(
new LatencyAliasTarget(DomainName.of(endpoint.dnsName()), policy.dnsZone().get(), policy.id().zone())));
if (policy.canonicalName().isPresent()) {
var weightedTarget = new WeightedAliasTarget(
policy.canonicalName().get(), policy.dnsZone().get(), policy.id().zone(), weight);
regionEndpoint.add(weightedTarget);
} else {
var weightedTarget = new WeightedDirectTarget(
RecordData.from(policy.ipAddress().get()), policy.id().zone(), weight);
regionEndpoint.add(weightedTarget);
}
}
return endpoints.values();
}
private void updateApplicationDnsOf(RoutingPolicyList routingPolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingId, List<RoutingPolicy>> routingTable = routingPolicies.asApplicationRoutingTable();
if (routingTable.isEmpty()) return;
Application application = controller.applications().requireApplication(routingTable.keySet().iterator().next().application());
Map<Endpoint, Set<Target>> targetsByEndpoint = new LinkedHashMap<>();
Map<Endpoint, Set<Target>> inactiveTargetsByEndpoint = new LinkedHashMap<>();
for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
RoutingId routingId = routeEntry.getKey();
EndpointList endpoints = controller.routing().declaredEndpointsOf(application)
.scope(Endpoint.Scope.application)
.named(routingId.endpointId());
for (Endpoint endpoint : endpoints) {
for (var policy : routeEntry.getValue()) {
for (var target : endpoint.targets()) {
if (!policy.appliesTo(target.deployment())) continue;
if (policy.dnsZone().isEmpty() && policy.canonicalName().isPresent())
continue;
ZoneRoutingPolicy zonePolicy = db.readZoneRoutingPolicy(policy.id().zone());
Set<Target> activeTargets = targetsByEndpoint.computeIfAbsent(endpoint, (k) -> new LinkedHashSet<>());
Set<Target> inactiveTargets = inactiveTargetsByEndpoint.computeIfAbsent(endpoint, (k) -> new LinkedHashSet<>());
if (isConfiguredOut(zonePolicy, policy, inactiveZones)) {
inactiveTargets.add(Target.weighted(policy, target));
}
else {
activeTargets.add(Target.weighted(policy, target));
}
}
}
}
}
for (var kv : targetsByEndpoint.entrySet()) {
Endpoint endpoint = kv.getKey();
Set<Target> activeTargets = kv.getValue();
if (!activeTargets.isEmpty()) {
continue;
}
Set<Target> inactiveTargets = inactiveTargetsByEndpoint.get(endpoint);
activeTargets.addAll(inactiveTargets);
inactiveTargets.clear();
}
targetsByEndpoint.forEach((applicationEndpoint, targets) -> {
ZoneId targetZone = applicationEndpoint.targets().iterator().next().deployment().zoneId();
Set<AliasTarget> aliasTargets = new LinkedHashSet<>();
Set<DirectTarget> directTargets = new LinkedHashSet<>();
for (Target target : targets) {
if (target.aliasOrDirectTarget() instanceof AliasTarget at) aliasTargets.add(at);
else directTargets.add((DirectTarget) target.aliasOrDirectTarget());
}
if ( ! aliasTargets.isEmpty()) {
nameServiceForwarderIn(targetZone).createAlias(
RecordName.from(applicationEndpoint.dnsName()), aliasTargets, Priority.normal);
nameServiceForwarderIn(targetZone).createAlias(
RecordName.from(applicationEndpoint.legacyRegionalDsnName()), aliasTargets, Priority.normal);
}
if ( ! directTargets.isEmpty()) {
nameServiceForwarderIn(targetZone).createDirect(
RecordName.from(applicationEndpoint.dnsName()), directTargets, Priority.normal);
nameServiceForwarderIn(targetZone).createDirect(
RecordName.from(applicationEndpoint.legacyRegionalDsnName()), directTargets, Priority.normal);
}
});
inactiveTargetsByEndpoint.forEach((applicationEndpoint, targets) -> {
ZoneId targetZone = applicationEndpoint.targets().iterator().next().deployment().zoneId();
targets.forEach(target -> {
nameServiceForwarderIn(targetZone).removeRecords(target.type(),
RecordName.from(applicationEndpoint.dnsName()),
target.data(),
Priority.normal);
nameServiceForwarderIn(targetZone).removeRecords(target.type(),
RecordName.from(applicationEndpoint.legacyRegionalDsnName()),
target.data(),
Priority.normal);
});
});
}
/**
* Store routing policies for given load balancers
*
* @return the updated policies
*/
private RoutingPolicyList storePoliciesOf(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingPolicyId, RoutingPolicy> policies = new LinkedHashMap<>(instancePolicies.asMap());
for (LoadBalancer loadBalancer : allocation.loadBalancers) {
if (loadBalancer.hostname().isEmpty() && loadBalancer.ipAddress().isEmpty()) continue;
var policyId = new RoutingPolicyId(loadBalancer.application(), loadBalancer.cluster(), allocation.deployment.zoneId());
var existingPolicy = policies.get(policyId);
var dnsZone = loadBalancer.ipAddress().isPresent() ? Optional.of("ignored") : loadBalancer.dnsZone();
var newPolicy = new RoutingPolicy(policyId, loadBalancer.hostname(), loadBalancer.ipAddress(), dnsZone,
allocation.instanceEndpointsOf(loadBalancer),
allocation.applicationEndpointsOf(loadBalancer),
new RoutingPolicy.Status(isActive(loadBalancer), RoutingStatus.DEFAULT));
if (existingPolicy != null) {
newPolicy = newPolicy.with(newPolicy.status().with(existingPolicy.status().routingStatus()));
}
updateZoneDnsOf(newPolicy);
policies.put(newPolicy.id(), newPolicy);
}
RoutingPolicyList updated = RoutingPolicyList.copyOf(policies.values());
db.writeRoutingPolicies(allocation.deployment.applicationId(), updated.asList());
return updated;
}
/** Update zone DNS record for given policy */
private void updateZoneDnsOf(RoutingPolicy policy) {
for (var endpoint : policy.zoneEndpointsIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry())) {
var name = RecordName.from(endpoint.dnsName());
var record = policy.canonicalName().isPresent() ?
new Record(Record.Type.CNAME, name, RecordData.fqdn(policy.canonicalName().get().value())) :
new Record(Record.Type.A, name, RecordData.from(policy.ipAddress().orElseThrow()));
nameServiceForwarderIn(policy.id().zone()).createRecord(record, Priority.normal);
}
}
/**
* Remove policies and zone DNS records unreferenced by given load balancers
*
* @return the updated policies
*/
private RoutingPolicyList removePoliciesUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingPolicyId, RoutingPolicy> newPolicies = new LinkedHashMap<>(instancePolicies.asMap());
Set<RoutingPolicyId> activeIds = allocation.asPolicyIds();
RoutingPolicyList removable = instancePolicies.deployment(allocation.deployment)
.not().matching(policy -> activeIds.contains(policy.id()));
for (var policy : removable) {
for (var endpoint : policy.zoneEndpointsIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry())) {
nameServiceForwarderIn(allocation.deployment.zoneId()).removeRecords(Record.Type.CNAME,
RecordName.from(endpoint.dnsName()),
Priority.normal);
}
newPolicies.remove(policy.id());
}
RoutingPolicyList updated = RoutingPolicyList.copyOf(newPolicies.values());
db.writeRoutingPolicies(allocation.deployment.applicationId(), updated.asList());
return updated;
}
/** Remove unreferenced instance endpoints from DNS */
private void removeGlobalDnsUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList deploymentPolicies, @SuppressWarnings("unused") Mutex lock) {
Set<RoutingId> removalCandidates = new HashSet<>(deploymentPolicies.asInstanceRoutingTable().keySet());
Set<RoutingId> activeRoutingIds = instanceRoutingIds(allocation);
removalCandidates.removeAll(activeRoutingIds);
for (var id : removalCandidates) {
EndpointList endpoints = controller.routing().readDeclaredEndpointsOf(id.instance())
.not().requiresRotation()
.named(id.endpointId());
NameServiceForwarder forwarder = nameServiceForwarderIn(allocation.deployment.zoneId());
endpoints.forEach(endpoint -> forwarder.removeRecords(Record.Type.ALIAS, RecordName.from(endpoint.dnsName()),
Priority.normal));
}
}
/** Remove unreferenced application endpoints in given allocation from DNS */
private Set<RoutingId> instanceRoutingIds(LoadBalancerAllocation allocation) {
return routingIdsFrom(allocation, false);
}
private Set<RoutingId> applicationRoutingIds(LoadBalancerAllocation allocation) {
return routingIdsFrom(allocation, true);
}
/** Compute routing IDs from given load balancers */
private static Set<RoutingId> routingIdsFrom(LoadBalancerAllocation allocation, boolean applicationLevel) {
Set<RoutingId> routingIds = new LinkedHashSet<>();
for (var loadBalancer : allocation.loadBalancers) {
Set<EndpointId> endpoints = applicationLevel
? allocation.applicationEndpointsOf(loadBalancer)
: allocation.instanceEndpointsOf(loadBalancer);
for (var endpointId : endpoints) {
routingIds.add(RoutingId.of(loadBalancer.application(), endpointId));
}
}
return Collections.unmodifiableSet(routingIds);
}
/** Returns whether the endpoints of given policy are configured {@link RoutingStatus.Value
private static boolean isConfiguredOut(ZoneRoutingPolicy zonePolicy, RoutingPolicy policy, Set<ZoneId> inactiveZones) {
return zonePolicy.routingStatus().value() == RoutingStatus.Value.out ||
policy.status().routingStatus().value() == RoutingStatus.Value.out ||
inactiveZones.contains(policy.id().zone());
}
private static boolean isActive(LoadBalancer loadBalancer) {
return switch (loadBalancer.state()) {
case reserved, active -> true;
default -> false;
};
}
/** Represents records for a region-wide endpoint */
private static class RegionEndpoint {
private final LatencyAliasTarget target;
private final Set<WeightedAliasTarget> zoneAliasTargets = new LinkedHashSet<>();
private final Set<WeightedDirectTarget> zoneDirectTargets = new LinkedHashSet<>();
public RegionEndpoint(LatencyAliasTarget target) {
this.target = Objects.requireNonNull(target);
}
public LatencyAliasTarget target() { return target; }
public Set<AliasTarget> zoneAliasTargets() { return Collections.unmodifiableSet(zoneAliasTargets); }
public Set<DirectTarget> zoneDirectTargets() { return Collections.unmodifiableSet(zoneDirectTargets); }
public void add(WeightedAliasTarget target) { zoneAliasTargets.add(target); }
public void add(WeightedDirectTarget target) { zoneDirectTargets.add(target); }
public boolean active() {
return zoneAliasTargets.stream().anyMatch(target -> target.weight() > 0) ||
zoneDirectTargets.stream().anyMatch(target -> target.weight() > 0);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RegionEndpoint that = (RegionEndpoint) o;
return target.name().equals(that.target.name());
}
@Override
public int hashCode() {
return Objects.hash(target.name());
}
}
/** Load balancers allocated to a deployment */
private static class LoadBalancerAllocation {
private final DeploymentId deployment;
private final List<LoadBalancer> loadBalancers;
private final DeploymentSpec deploymentSpec;
private LoadBalancerAllocation(List<LoadBalancer> loadBalancers, DeploymentId deployment,
DeploymentSpec deploymentSpec) {
this.deployment = deployment;
this.loadBalancers = List.copyOf(loadBalancers);
this.deploymentSpec = deploymentSpec;
}
/** Returns the policy IDs of the load balancers contained in this */
private Set<RoutingPolicyId> asPolicyIds() {
return loadBalancers.stream()
.map(lb -> new RoutingPolicyId(lb.application(),
lb.cluster(),
deployment.zoneId()))
.collect(Collectors.toUnmodifiableSet());
}
/** Returns all instance endpoint IDs served by given load balancer */
private Set<EndpointId> instanceEndpointsOf(LoadBalancer loadBalancer) {
if (!deployment.zoneId().environment().isProduction()) {
return Set.of();
}
var instanceSpec = deploymentSpec.instance(loadBalancer.application().instance());
if (instanceSpec.isEmpty()) {
return Set.of();
}
if (instanceSpec.get().globalServiceId().filter(id -> id.equals(loadBalancer.cluster().value())).isPresent()) {
return Set.of(EndpointId.defaultId());
}
return instanceSpec.get().endpoints().stream()
.filter(endpoint -> endpoint.containerId().equals(loadBalancer.cluster().value()))
.filter(endpoint -> endpoint.regions().contains(deployment.zoneId().region()))
.map(com.yahoo.config.application.api.Endpoint::endpointId)
.map(EndpointId::of)
.collect(Collectors.toUnmodifiableSet());
}
/** Returns all application endpoint IDs served by given load balancer */
private Set<EndpointId> applicationEndpointsOf(LoadBalancer loadBalancer) {
if (!deployment.zoneId().environment().isProduction()) {
return Set.of();
}
return deploymentSpec.endpoints().stream()
.filter(endpoint -> endpoint.containerId().equals(loadBalancer.cluster().value()))
.filter(endpoint -> endpoint.targets().stream()
.anyMatch(target -> target.region().equals(deployment.zoneId().region()) &&
target.instance().equals(deployment.applicationId().instance())))
.map(com.yahoo.config.application.api.Endpoint::endpointId)
.map(EndpointId::of)
.collect(Collectors.toUnmodifiableSet());
}
}
/** Returns zones where global routing is declared inactive for instance through deploymentSpec */
private static Set<ZoneId> inactiveZones(ApplicationId instance, DeploymentSpec deploymentSpec) {
var instanceSpec = deploymentSpec.instance(instance.instance());
if (instanceSpec.isEmpty()) return Set.of();
return instanceSpec.get().zones().stream()
.filter(zone -> zone.environment().isProduction())
.filter(zone -> !zone.active())
.map(zone -> ZoneId.from(zone.environment(), zone.region().get()))
.collect(Collectors.toUnmodifiableSet());
}
/** Returns the name updater to use for given zone */
private NameServiceForwarder nameServiceForwarderIn(ZoneId zone) {
return switch (controller.zoneRegistry().routingMethod(zone)) {
case exclusive -> controller.nameServiceForwarder();
case sharedLayer4 -> new NameServiceDiscarder(controller.curator());
};
}
/** Denotes record data (record rhs) of either an ALIAS or a DIRECT target */
private record Target(Record.Type type, RecordData data, Object aliasOrDirectTarget) {
static Target weighted(RoutingPolicy policy, Endpoint.Target endpointTarget) {
if (policy.ipAddress().isPresent()) {
var wt = new WeightedDirectTarget(RecordData.from(policy.ipAddress().get()),
endpointTarget.deployment().zoneId(), endpointTarget.weight());
return new Target(Record.Type.DIRECT, wt.recordData(), wt);
}
var wt = new WeightedAliasTarget(policy.canonicalName().get(), policy.dnsZone().get(),
endpointTarget.deployment().zoneId(), endpointTarget.weight());
return new Target(Record.Type.ALIAS, RecordData.fqdn(wt.name().value()), wt);
}
}
/** A {@link NameServiceForwarder} that does nothing. Used in zones where no explicit DNS updates are needed */
private static class NameServiceDiscarder extends NameServiceForwarder {
public NameServiceDiscarder(CuratorDb db) {
super(db);
}
@Override
protected void forward(NameServiceRequest request, Priority priority) {
}
}
} | class RoutingPolicies {
private final Controller controller;
private final CuratorDb db;
public RoutingPolicies(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.db = controller.curator();
try (var lock = db.lockRoutingPolicies()) {
for (var policy : db.readRoutingPolicies().entrySet()) {
db.writeRoutingPolicies(policy.getKey(), policy.getValue());
}
}
}
/** Read all routing policies for given deployment */
public RoutingPolicyList read(DeploymentId deployment) {
return read(deployment.applicationId()).deployment(deployment);
}
/** Read all routing policies for given instance */
public RoutingPolicyList read(ApplicationId instance) {
return RoutingPolicyList.copyOf(db.readRoutingPolicies(instance));
}
/** Read all routing policies for given application */
private RoutingPolicyList read(TenantAndApplicationId application) {
return db.readRoutingPolicies((instance) -> TenantAndApplicationId.from(instance).equals(application))
.values()
.stream()
.flatMap(Collection::stream)
.collect(Collectors.collectingAndThen(Collectors.toList(), RoutingPolicyList::copyOf));
}
/** Read all routing policies */
private RoutingPolicyList readAll() {
return db.readRoutingPolicies()
.values()
.stream()
.flatMap(Collection::stream)
.collect(Collectors.collectingAndThen(Collectors.toList(), RoutingPolicyList::copyOf));
}
/** Read routing policy for given zone */
public ZoneRoutingPolicy read(ZoneId zone) {
return db.readZoneRoutingPolicy(zone);
}
/**
* Refresh routing policies for instance in given zone. This is idempotent and changes will only be performed if
* routing configuration affecting given deployment has changed.
*/
public void refresh(DeploymentId deployment, DeploymentSpec deploymentSpec) {
ApplicationId instance = deployment.applicationId();
List<LoadBalancer> loadBalancers = controller.serviceRegistry().configServer()
.getLoadBalancers(instance, deployment.zoneId());
LoadBalancerAllocation allocation = new LoadBalancerAllocation(loadBalancers, deployment, deploymentSpec);
Set<ZoneId> inactiveZones = inactiveZones(instance, deploymentSpec);
try (var lock = db.lockRoutingPolicies()) {
RoutingPolicyList applicationPolicies = read(TenantAndApplicationId.from(instance));
RoutingPolicyList instancePolicies = applicationPolicies.instance(instance);
RoutingPolicyList deploymentPolicies = applicationPolicies.deployment(allocation.deployment);
removeGlobalDnsUnreferencedBy(allocation, deploymentPolicies, lock);
removeApplicationDnsUnreferencedBy(allocation, deploymentPolicies, lock);
instancePolicies = storePoliciesOf(allocation, instancePolicies, lock);
instancePolicies = removePoliciesUnreferencedBy(allocation, instancePolicies, lock);
applicationPolicies = applicationPolicies.replace(instance, instancePolicies);
updateGlobalDnsOf(instancePolicies, inactiveZones, lock);
updateApplicationDnsOf(applicationPolicies, inactiveZones, lock);
}
}
/** Set the status of all global endpoints in given zone */
public void setRoutingStatus(ZoneId zone, RoutingStatus.Value value) {
try (var lock = db.lockRoutingPolicies()) {
db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, RoutingStatus.create(value, RoutingStatus.Agent.operator,
controller.clock().instant())));
Map<ApplicationId, RoutingPolicyList> allPolicies = readAll().groupingBy(policy -> policy.id().owner());
for (var instancePolicies : allPolicies.values()) {
updateGlobalDnsOf(instancePolicies, Set.of(), lock);
}
}
}
/** Set the status of all global endpoints for given deployment */
public void setRoutingStatus(DeploymentId deployment, RoutingStatus.Value value, RoutingStatus.Agent agent) {
ApplicationId instance = deployment.applicationId();
try (var lock = db.lockRoutingPolicies()) {
RoutingPolicyList applicationPolicies = read(TenantAndApplicationId.from(instance));
RoutingPolicyList deploymentPolicies = applicationPolicies.deployment(deployment);
Map<RoutingPolicyId, RoutingPolicy> updatedPolicies = new LinkedHashMap<>(applicationPolicies.asMap());
for (var policy : deploymentPolicies) {
var newPolicy = policy.with(policy.status().with(RoutingStatus.create(value, agent,
controller.clock().instant())));
updatedPolicies.put(policy.id(), newPolicy);
}
RoutingPolicyList effectivePolicies = RoutingPolicyList.copyOf(updatedPolicies.values());
Map<ApplicationId, RoutingPolicyList> policiesByInstance = effectivePolicies.groupingBy(policy -> policy.id().owner());
policiesByInstance.forEach((owner, instancePolicies) -> db.writeRoutingPolicies(owner, instancePolicies.asList()));
policiesByInstance.forEach((ignored, instancePolicies) -> updateGlobalDnsOf(instancePolicies, Set.of(), lock));
updateApplicationDnsOf(effectivePolicies, Set.of(), lock);
}
}
/** Update global DNS records for given policies */
private void updateGlobalDnsOf(RoutingPolicyList instancePolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingId, List<RoutingPolicy>> routingTable = instancePolicies.asInstanceRoutingTable();
for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
RoutingId routingId = routeEntry.getKey();
controller.routing().readDeclaredEndpointsOf(routingId.instance())
.named(routingId.endpointId())
.not().requiresRotation()
.forEach(endpoint -> updateGlobalDnsOf(endpoint, inactiveZones, routeEntry.getValue()));
}
}
/** Update global DNS records for given global endpoint */
private void updateGlobalDnsOf(Endpoint endpoint, Set<ZoneId> inactiveZones, List<RoutingPolicy> policies) {
if (endpoint.scope() != Endpoint.Scope.global) throw new IllegalArgumentException("Endpoint " + endpoint + " is not global");
Collection<RegionEndpoint> regionEndpoints = computeRegionEndpoints(policies, inactiveZones);
regionEndpoints.forEach(regionEndpoint -> {
if ( ! regionEndpoint.zoneAliasTargets().isEmpty()) {
controller.nameServiceForwarder().createAlias(RecordName.from(regionEndpoint.target().name().value()),
regionEndpoint.zoneAliasTargets(),
Priority.normal);
}
if ( ! regionEndpoint.zoneDirectTargets().isEmpty()) {
controller.nameServiceForwarder().createDirect(RecordName.from(regionEndpoint.target().name().value()),
regionEndpoint.zoneDirectTargets(),
Priority.normal);
}
});
Set<AliasTarget> latencyTargets = new LinkedHashSet<>();
Set<AliasTarget> inactiveLatencyTargets = new LinkedHashSet<>();
for (var regionEndpoint : regionEndpoints) {
if (regionEndpoint.active()) {
latencyTargets.add(regionEndpoint.target());
} else {
inactiveLatencyTargets.add(regionEndpoint.target());
}
}
if (latencyTargets.isEmpty() && !inactiveLatencyTargets.isEmpty()) {
latencyTargets.addAll(inactiveLatencyTargets);
inactiveLatencyTargets.clear();
}
controller.nameServiceForwarder().createAlias(RecordName.from(endpoint.dnsName()), latencyTargets, Priority.normal);
inactiveLatencyTargets.forEach(t -> controller.nameServiceForwarder()
.removeRecords(Record.Type.ALIAS,
RecordData.fqdn(t.name().value()),
Priority.normal));
}
/** Compute region endpoints and their targets from given policies */
private Collection<RegionEndpoint> computeRegionEndpoints(List<RoutingPolicy> policies, Set<ZoneId> inactiveZones) {
Map<Endpoint, RegionEndpoint> endpoints = new LinkedHashMap<>();
for (var policy : policies) {
if (policy.dnsZone().isEmpty() && policy.canonicalName().isPresent()) continue;
if (controller.zoneRegistry().routingMethod(policy.id().zone()) != RoutingMethod.exclusive) continue;
Endpoint endpoint = policy.regionEndpointIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry());
var zonePolicy = db.readZoneRoutingPolicy(policy.id().zone());
long weight = 1;
if (isConfiguredOut(zonePolicy, policy, inactiveZones)) {
weight = 0;
}
RegionEndpoint regionEndpoint = endpoints.computeIfAbsent(endpoint, (k) -> new RegionEndpoint(
new LatencyAliasTarget(DomainName.of(endpoint.dnsName()), policy.dnsZone().get(), policy.id().zone())));
if (policy.canonicalName().isPresent()) {
var weightedTarget = new WeightedAliasTarget(
policy.canonicalName().get(), policy.dnsZone().get(), policy.id().zone(), weight);
regionEndpoint.add(weightedTarget);
} else {
var weightedTarget = new WeightedDirectTarget(
RecordData.from(policy.ipAddress().get()), policy.id().zone(), weight);
regionEndpoint.add(weightedTarget);
}
}
return endpoints.values();
}
private void updateApplicationDnsOf(RoutingPolicyList routingPolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingId, List<RoutingPolicy>> routingTable = routingPolicies.asApplicationRoutingTable();
if (routingTable.isEmpty()) return;
Application application = controller.applications().requireApplication(routingTable.keySet().iterator().next().application());
Map<Endpoint, Set<Target>> targetsByEndpoint = new LinkedHashMap<>();
Map<Endpoint, Set<Target>> inactiveTargetsByEndpoint = new LinkedHashMap<>();
for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
RoutingId routingId = routeEntry.getKey();
EndpointList endpoints = controller.routing().declaredEndpointsOf(application)
.scope(Endpoint.Scope.application)
.named(routingId.endpointId());
for (Endpoint endpoint : endpoints) {
for (var policy : routeEntry.getValue()) {
for (var target : endpoint.targets()) {
if (!policy.appliesTo(target.deployment())) continue;
if (policy.dnsZone().isEmpty() && policy.canonicalName().isPresent())
continue;
ZoneRoutingPolicy zonePolicy = db.readZoneRoutingPolicy(policy.id().zone());
Set<Target> activeTargets = targetsByEndpoint.computeIfAbsent(endpoint, (k) -> new LinkedHashSet<>());
Set<Target> inactiveTargets = inactiveTargetsByEndpoint.computeIfAbsent(endpoint, (k) -> new LinkedHashSet<>());
if (isConfiguredOut(zonePolicy, policy, inactiveZones)) {
inactiveTargets.add(Target.weighted(policy, target));
}
else {
activeTargets.add(Target.weighted(policy, target));
}
}
}
}
}
for (var kv : targetsByEndpoint.entrySet()) {
Endpoint endpoint = kv.getKey();
Set<Target> activeTargets = kv.getValue();
if (!activeTargets.isEmpty()) {
continue;
}
Set<Target> inactiveTargets = inactiveTargetsByEndpoint.get(endpoint);
activeTargets.addAll(inactiveTargets);
inactiveTargets.clear();
}
targetsByEndpoint.forEach((applicationEndpoint, targets) -> {
ZoneId targetZone = applicationEndpoint.targets().iterator().next().deployment().zoneId();
Set<AliasTarget> aliasTargets = new LinkedHashSet<>();
Set<DirectTarget> directTargets = new LinkedHashSet<>();
for (Target target : targets) {
if (target.aliasOrDirectTarget() instanceof AliasTarget at) aliasTargets.add(at);
else directTargets.add((DirectTarget) target.aliasOrDirectTarget());
}
if ( ! aliasTargets.isEmpty()) {
nameServiceForwarderIn(targetZone).createAlias(
RecordName.from(applicationEndpoint.dnsName()), aliasTargets, Priority.normal);
nameServiceForwarderIn(targetZone).createAlias(
RecordName.from(applicationEndpoint.legacyRegionalDnsName()), aliasTargets, Priority.normal);
}
if ( ! directTargets.isEmpty()) {
nameServiceForwarderIn(targetZone).createDirect(
RecordName.from(applicationEndpoint.dnsName()), directTargets, Priority.normal);
nameServiceForwarderIn(targetZone).createDirect(
RecordName.from(applicationEndpoint.legacyRegionalDnsName()), directTargets, Priority.normal);
}
});
inactiveTargetsByEndpoint.forEach((applicationEndpoint, targets) -> {
ZoneId targetZone = applicationEndpoint.targets().iterator().next().deployment().zoneId();
targets.forEach(target -> {
nameServiceForwarderIn(targetZone).removeRecords(target.type(),
RecordName.from(applicationEndpoint.dnsName()),
target.data(),
Priority.normal);
nameServiceForwarderIn(targetZone).removeRecords(target.type(),
RecordName.from(applicationEndpoint.legacyRegionalDnsName()),
target.data(),
Priority.normal);
});
});
}
/**
* Store routing policies for given load balancers
*
* @return the updated policies
*/
private RoutingPolicyList storePoliciesOf(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingPolicyId, RoutingPolicy> policies = new LinkedHashMap<>(instancePolicies.asMap());
for (LoadBalancer loadBalancer : allocation.loadBalancers) {
if (loadBalancer.hostname().isEmpty() && loadBalancer.ipAddress().isEmpty()) continue;
var policyId = new RoutingPolicyId(loadBalancer.application(), loadBalancer.cluster(), allocation.deployment.zoneId());
var existingPolicy = policies.get(policyId);
var dnsZone = loadBalancer.ipAddress().isPresent() ? Optional.of("ignored") : loadBalancer.dnsZone();
var newPolicy = new RoutingPolicy(policyId, loadBalancer.hostname(), loadBalancer.ipAddress(), dnsZone,
allocation.instanceEndpointsOf(loadBalancer),
allocation.applicationEndpointsOf(loadBalancer),
new RoutingPolicy.Status(isActive(loadBalancer), RoutingStatus.DEFAULT));
if (existingPolicy != null) {
newPolicy = newPolicy.with(newPolicy.status().with(existingPolicy.status().routingStatus()));
}
updateZoneDnsOf(newPolicy);
policies.put(newPolicy.id(), newPolicy);
}
RoutingPolicyList updated = RoutingPolicyList.copyOf(policies.values());
db.writeRoutingPolicies(allocation.deployment.applicationId(), updated.asList());
return updated;
}
/** Update zone DNS record for given policy */
private void updateZoneDnsOf(RoutingPolicy policy) {
for (var endpoint : policy.zoneEndpointsIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry())) {
var name = RecordName.from(endpoint.dnsName());
var record = policy.canonicalName().isPresent() ?
new Record(Record.Type.CNAME, name, RecordData.fqdn(policy.canonicalName().get().value())) :
new Record(Record.Type.A, name, RecordData.from(policy.ipAddress().orElseThrow()));
nameServiceForwarderIn(policy.id().zone()).createRecord(record, Priority.normal);
}
}
/**
* Remove policies and zone DNS records unreferenced by given load balancers
*
* @return the updated policies
*/
private RoutingPolicyList removePoliciesUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingPolicyId, RoutingPolicy> newPolicies = new LinkedHashMap<>(instancePolicies.asMap());
Set<RoutingPolicyId> activeIds = allocation.asPolicyIds();
RoutingPolicyList removable = instancePolicies.deployment(allocation.deployment)
.not().matching(policy -> activeIds.contains(policy.id()));
for (var policy : removable) {
for (var endpoint : policy.zoneEndpointsIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry())) {
nameServiceForwarderIn(allocation.deployment.zoneId()).removeRecords(Record.Type.CNAME,
RecordName.from(endpoint.dnsName()),
Priority.normal);
}
newPolicies.remove(policy.id());
}
RoutingPolicyList updated = RoutingPolicyList.copyOf(newPolicies.values());
db.writeRoutingPolicies(allocation.deployment.applicationId(), updated.asList());
return updated;
}
/** Remove unreferenced instance endpoints from DNS */
private void removeGlobalDnsUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList deploymentPolicies, @SuppressWarnings("unused") Mutex lock) {
Set<RoutingId> removalCandidates = new HashSet<>(deploymentPolicies.asInstanceRoutingTable().keySet());
Set<RoutingId> activeRoutingIds = instanceRoutingIds(allocation);
removalCandidates.removeAll(activeRoutingIds);
for (var id : removalCandidates) {
EndpointList endpoints = controller.routing().readDeclaredEndpointsOf(id.instance())
.not().requiresRotation()
.named(id.endpointId());
NameServiceForwarder forwarder = nameServiceForwarderIn(allocation.deployment.zoneId());
endpoints.forEach(endpoint -> forwarder.removeRecords(Record.Type.ALIAS, RecordName.from(endpoint.dnsName()),
Priority.normal));
}
}
/** Remove unreferenced application endpoints in given allocation from DNS */
private Set<RoutingId> instanceRoutingIds(LoadBalancerAllocation allocation) {
return routingIdsFrom(allocation, false);
}
private Set<RoutingId> applicationRoutingIds(LoadBalancerAllocation allocation) {
return routingIdsFrom(allocation, true);
}
/** Compute routing IDs from given load balancers */
private static Set<RoutingId> routingIdsFrom(LoadBalancerAllocation allocation, boolean applicationLevel) {
Set<RoutingId> routingIds = new LinkedHashSet<>();
for (var loadBalancer : allocation.loadBalancers) {
Set<EndpointId> endpoints = applicationLevel
? allocation.applicationEndpointsOf(loadBalancer)
: allocation.instanceEndpointsOf(loadBalancer);
for (var endpointId : endpoints) {
routingIds.add(RoutingId.of(loadBalancer.application(), endpointId));
}
}
return Collections.unmodifiableSet(routingIds);
}
/** Returns whether the endpoints of given policy are configured {@link RoutingStatus.Value
private static boolean isConfiguredOut(ZoneRoutingPolicy zonePolicy, RoutingPolicy policy, Set<ZoneId> inactiveZones) {
return zonePolicy.routingStatus().value() == RoutingStatus.Value.out ||
policy.status().routingStatus().value() == RoutingStatus.Value.out ||
inactiveZones.contains(policy.id().zone());
}
private static boolean isActive(LoadBalancer loadBalancer) {
return switch (loadBalancer.state()) {
case reserved, active -> true;
default -> false;
};
}
/** Represents records for a region-wide endpoint */
private static class RegionEndpoint {
private final LatencyAliasTarget target;
private final Set<WeightedAliasTarget> zoneAliasTargets = new LinkedHashSet<>();
private final Set<WeightedDirectTarget> zoneDirectTargets = new LinkedHashSet<>();
public RegionEndpoint(LatencyAliasTarget target) {
this.target = Objects.requireNonNull(target);
}
public LatencyAliasTarget target() { return target; }
public Set<AliasTarget> zoneAliasTargets() { return Collections.unmodifiableSet(zoneAliasTargets); }
public Set<DirectTarget> zoneDirectTargets() { return Collections.unmodifiableSet(zoneDirectTargets); }
public void add(WeightedAliasTarget target) { zoneAliasTargets.add(target); }
public void add(WeightedDirectTarget target) { zoneDirectTargets.add(target); }
public boolean active() {
return zoneAliasTargets.stream().anyMatch(target -> target.weight() > 0) ||
zoneDirectTargets.stream().anyMatch(target -> target.weight() > 0);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RegionEndpoint that = (RegionEndpoint) o;
return target.name().equals(that.target.name());
}
@Override
public int hashCode() {
return Objects.hash(target.name());
}
}
/** Load balancers allocated to a deployment */
private static class LoadBalancerAllocation {
private final DeploymentId deployment;
private final List<LoadBalancer> loadBalancers;
private final DeploymentSpec deploymentSpec;
private LoadBalancerAllocation(List<LoadBalancer> loadBalancers, DeploymentId deployment,
DeploymentSpec deploymentSpec) {
this.deployment = deployment;
this.loadBalancers = List.copyOf(loadBalancers);
this.deploymentSpec = deploymentSpec;
}
/** Returns the policy IDs of the load balancers contained in this */
private Set<RoutingPolicyId> asPolicyIds() {
return loadBalancers.stream()
.map(lb -> new RoutingPolicyId(lb.application(),
lb.cluster(),
deployment.zoneId()))
.collect(Collectors.toUnmodifiableSet());
}
/** Returns all instance endpoint IDs served by given load balancer */
private Set<EndpointId> instanceEndpointsOf(LoadBalancer loadBalancer) {
if (!deployment.zoneId().environment().isProduction()) {
return Set.of();
}
var instanceSpec = deploymentSpec.instance(loadBalancer.application().instance());
if (instanceSpec.isEmpty()) {
return Set.of();
}
if (instanceSpec.get().globalServiceId().filter(id -> id.equals(loadBalancer.cluster().value())).isPresent()) {
return Set.of(EndpointId.defaultId());
}
return instanceSpec.get().endpoints().stream()
.filter(endpoint -> endpoint.containerId().equals(loadBalancer.cluster().value()))
.filter(endpoint -> endpoint.regions().contains(deployment.zoneId().region()))
.map(com.yahoo.config.application.api.Endpoint::endpointId)
.map(EndpointId::of)
.collect(Collectors.toUnmodifiableSet());
}
/** Returns all application endpoint IDs served by given load balancer */
private Set<EndpointId> applicationEndpointsOf(LoadBalancer loadBalancer) {
if (!deployment.zoneId().environment().isProduction()) {
return Set.of();
}
return deploymentSpec.endpoints().stream()
.filter(endpoint -> endpoint.containerId().equals(loadBalancer.cluster().value()))
.filter(endpoint -> endpoint.targets().stream()
.anyMatch(target -> target.region().equals(deployment.zoneId().region()) &&
target.instance().equals(deployment.applicationId().instance())))
.map(com.yahoo.config.application.api.Endpoint::endpointId)
.map(EndpointId::of)
.collect(Collectors.toUnmodifiableSet());
}
}
/** Returns zones where global routing is declared inactive for instance through deploymentSpec */
private static Set<ZoneId> inactiveZones(ApplicationId instance, DeploymentSpec deploymentSpec) {
var instanceSpec = deploymentSpec.instance(instance.instance());
if (instanceSpec.isEmpty()) return Set.of();
return instanceSpec.get().zones().stream()
.filter(zone -> zone.environment().isProduction())
.filter(zone -> !zone.active())
.map(zone -> ZoneId.from(zone.environment(), zone.region().get()))
.collect(Collectors.toUnmodifiableSet());
}
/** Returns the name updater to use for given zone */
private NameServiceForwarder nameServiceForwarderIn(ZoneId zone) {
return switch (controller.zoneRegistry().routingMethod(zone)) {
case exclusive -> controller.nameServiceForwarder();
case sharedLayer4 -> new NameServiceDiscarder(controller.curator());
};
}
/** Denotes record data (record rhs) of either an ALIAS or a DIRECT target */
private record Target(Record.Type type, RecordData data, Object aliasOrDirectTarget) {
static Target weighted(RoutingPolicy policy, Endpoint.Target endpointTarget) {
if (policy.ipAddress().isPresent()) {
var wt = new WeightedDirectTarget(RecordData.from(policy.ipAddress().get()),
endpointTarget.deployment().zoneId(), endpointTarget.weight());
return new Target(Record.Type.DIRECT, wt.recordData(), wt);
}
var wt = new WeightedAliasTarget(policy.canonicalName().get(), policy.dnsZone().get(),
endpointTarget.deployment().zoneId(), endpointTarget.weight());
return new Target(Record.Type.ALIAS, RecordData.fqdn(wt.name().value()), wt);
}
}
/** A {@link NameServiceForwarder} that does nothing. Used in zones where no explicit DNS updates are needed */
private static class NameServiceDiscarder extends NameServiceForwarder {
public NameServiceDiscarder(CuratorDb db) {
super(db);
}
@Override
protected void forward(NameServiceRequest request, Priority priority) {
}
}
} |
This used to be `true`? | public static FileDBRegistry create(AddFileInterface manager, Reader persistedState) {
try (BufferedReader reader = new BufferedReader(persistedState)) {
String ignoredFileSourceHost = reader.readLine();
if (ignoredFileSourceHost == null)
throw new RuntimeException("No file source host");
return new FileDBRegistry(manager, decode(reader), false);
} catch (IOException e) {
throw new RuntimeException("Error while reading pre-generated file registry", e);
}
} | return new FileDBRegistry(manager, decode(reader), false); | public static FileDBRegistry create(AddFileInterface manager, Reader persistedState) {
try (BufferedReader reader = new BufferedReader(persistedState)) {
String ignoredFileSourceHost = reader.readLine();
if (ignoredFileSourceHost == null)
throw new RuntimeException("No file source host");
return new FileDBRegistry(manager, decode(reader), true);
} catch (IOException e) {
throw new RuntimeException("Error while reading pre-generated file registry", e);
}
} | class FileDBRegistry implements FileRegistry {
private final boolean silenceNonExistingFiles;
private final AddFileInterface manager;
private final Map<String, FileReference> fileReferenceCache = new HashMap<>();
private static final String entryDelimiter = "\t";
private static final Pattern entryDelimiterPattern = Pattern.compile(entryDelimiter, Pattern.LITERAL);
public FileDBRegistry(AddFileInterface manager) {
this(manager, Map.of(), false);
}
private FileDBRegistry(AddFileInterface manager, Map<String, FileReference> knownReferences, boolean silenceNonExistingFiles) {
this.silenceNonExistingFiles = silenceNonExistingFiles;
this.manager = manager;
fileReferenceCache.putAll(knownReferences);
}
static Map<String, FileReference> decode(BufferedReader reader) {
Map<String, FileReference> refs = new HashMap<>();
try {
String line;
while ((line = reader.readLine()) != null) {
String[] parts = entryDelimiterPattern.split(line);
if (parts.length < 2)
throw new IllegalArgumentException("Cannot split '" + line + "' into two parts");
refs.put(parts[0], new FileReference(parts[1]));
}
} catch (IOException e) {
throw new RuntimeException("Error while reading pre-generated file registry", e);
}
return refs;
}
@Override
public synchronized FileReference addFile(String relativePath) {
if (relativePath.startsWith("/"))
throw new IllegalArgumentException(relativePath + " is not relative");
Optional<FileReference> cachedReference = Optional.ofNullable(fileReferenceCache.get(relativePath));
return cachedReference.orElseGet(() -> {
try {
FileReference newRef = manager.addFile(Path.fromString(relativePath));
fileReferenceCache.put(relativePath, newRef);
return newRef;
} catch (FileNotFoundException e) {
if (silenceNonExistingFiles) {
return new FileReference("non-existing-file");
} else {
throw new IllegalArgumentException(e);
}
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
);
}
@Override
public synchronized FileReference addUri(String uri) {
String relativePath = uriToRelativeFile(uri);
Optional<FileReference> cachedReference = Optional.ofNullable(fileReferenceCache.get(uri));
return cachedReference.orElseGet(() -> {
FileReference newRef = manager.addUri(uri, Path.fromString(relativePath));
fileReferenceCache.put(uri, newRef);
return newRef;
});
}
@Override
public FileReference addBlob(String blobName, ByteBuffer blob) {
String relativePath = blobToRelativeFile(blobName);
synchronized (this) {
Optional<FileReference> cachedReference = Optional.ofNullable(fileReferenceCache.get(blobName));
return cachedReference.orElseGet(() -> {
FileReference newRef = manager.addBlob(blob, Path.fromString(relativePath));
fileReferenceCache.put(blobName, newRef);
return newRef;
});
}
}
@Override
public synchronized List<Entry> export() {
List<Entry> entries = new ArrayList<>();
for (Map.Entry<String, FileReference> entry : fileReferenceCache.entrySet()) {
entries.add(new Entry(entry.getKey(), entry.getValue()));
}
return entries;
}
synchronized Map<String, FileReference> getMap() {
return ImmutableMap.copyOf(fileReferenceCache);
}
public static String exportRegistry(FileRegistry registry) {
List<Entry> entries = registry.export();
StringBuilder builder = new StringBuilder();
builder.append(HostName.getLocalhost()).append('\n');
for (FileRegistry.Entry entry : entries) {
builder.append(entry.relativePath).append(entryDelimiter).append(entry.reference.value()).append('\n');
}
return builder.toString();
}
private static String uriToRelativeFile(String uri) {
String relative = "uri/" + XXHashFactory.fastestJavaInstance().hash64().hash(ByteBuffer.wrap(Utf8.toBytes(uri)), 0);
if (uri.endsWith(".json")) {
relative += ".json";
} else if (uri.endsWith(".json.lz4")) {
relative += ".json.lz4";
} else if (uri.endsWith(".lz4")) {
relative += ".lz4";
}
return relative;
}
private static String blobToRelativeFile(String blobName) {
return "blob/" + blobName;
}
} | class FileDBRegistry implements FileRegistry {
private final boolean silenceNonExistingFiles;
private final AddFileInterface manager;
private final Map<String, FileReference> fileReferenceCache = new HashMap<>();
private static final String entryDelimiter = "\t";
private static final Pattern entryDelimiterPattern = Pattern.compile(entryDelimiter, Pattern.LITERAL);
public FileDBRegistry(AddFileInterface manager) {
this(manager, Map.of(), false);
}
private FileDBRegistry(AddFileInterface manager, Map<String, FileReference> knownReferences, boolean silenceNonExistingFiles) {
this.silenceNonExistingFiles = silenceNonExistingFiles;
this.manager = manager;
fileReferenceCache.putAll(knownReferences);
}
static Map<String, FileReference> decode(BufferedReader reader) {
Map<String, FileReference> refs = new HashMap<>();
try {
String line;
while ((line = reader.readLine()) != null) {
String[] parts = entryDelimiterPattern.split(line);
if (parts.length < 2)
throw new IllegalArgumentException("Cannot split '" + line + "' into two parts");
refs.put(parts[0], new FileReference(parts[1]));
}
} catch (IOException e) {
throw new RuntimeException("Error while reading pre-generated file registry", e);
}
return refs;
}
@Override
public synchronized FileReference addFile(String relativePath) {
if (relativePath.startsWith("/"))
throw new IllegalArgumentException(relativePath + " is not relative");
Optional<FileReference> cachedReference = Optional.ofNullable(fileReferenceCache.get(relativePath));
return cachedReference.orElseGet(() -> {
try {
FileReference newRef = manager.addFile(Path.fromString(relativePath));
fileReferenceCache.put(relativePath, newRef);
return newRef;
} catch (FileNotFoundException e) {
if (silenceNonExistingFiles) {
return new FileReference("non-existing-file");
} else {
throw new IllegalArgumentException(e);
}
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
);
}
@Override
public synchronized FileReference addUri(String uri) {
String relativePath = uriToRelativeFile(uri);
Optional<FileReference> cachedReference = Optional.ofNullable(fileReferenceCache.get(uri));
return cachedReference.orElseGet(() -> {
FileReference newRef = manager.addUri(uri, Path.fromString(relativePath));
fileReferenceCache.put(uri, newRef);
return newRef;
});
}
@Override
public FileReference addBlob(String blobName, ByteBuffer blob) {
String relativePath = blobToRelativeFile(blobName);
synchronized (this) {
Optional<FileReference> cachedReference = Optional.ofNullable(fileReferenceCache.get(blobName));
return cachedReference.orElseGet(() -> {
FileReference newRef = manager.addBlob(blob, Path.fromString(relativePath));
fileReferenceCache.put(blobName, newRef);
return newRef;
});
}
}
@Override
public synchronized List<Entry> export() {
List<Entry> entries = new ArrayList<>();
for (Map.Entry<String, FileReference> entry : fileReferenceCache.entrySet()) {
entries.add(new Entry(entry.getKey(), entry.getValue()));
}
return entries;
}
synchronized Map<String, FileReference> getMap() {
return ImmutableMap.copyOf(fileReferenceCache);
}
public static String exportRegistry(FileRegistry registry) {
List<Entry> entries = registry.export();
StringBuilder builder = new StringBuilder();
builder.append(HostName.getLocalhost()).append('\n');
for (FileRegistry.Entry entry : entries) {
builder.append(entry.relativePath).append(entryDelimiter).append(entry.reference.value()).append('\n');
}
return builder.toString();
}
private static String uriToRelativeFile(String uri) {
String relative = "uri/" + XXHashFactory.fastestJavaInstance().hash64().hash(ByteBuffer.wrap(Utf8.toBytes(uri)), 0);
if (uri.endsWith(".json")) {
relative += ".json";
} else if (uri.endsWith(".json.lz4")) {
relative += ".json.lz4";
} else if (uri.endsWith(".lz4")) {
relative += ".lz4";
}
return relative;
}
private static String blobToRelativeFile(String blobName) {
return "blob/" + blobName;
}
} |
Fixed | private static SearchGroupsImpl toGroups(Collection<Node> nodes, double minActivedocsPercentage) {
ImmutableMap.Builder<Integer, Group> groupsBuilder = new ImmutableMap.Builder<>();
for (Map.Entry<Integer, List<Node>> group : nodes.stream().collect(Collectors.groupingBy(Node::group)).entrySet()) {
Group g = new Group(group.getKey(), group.getValue());
groupsBuilder.put(group.getKey(), g);
}
return new SearchGroupsImpl(groupsBuilder.build(), minActivedocsPercentage);
} | ImmutableMap.Builder<Integer, Group> groupsBuilder = new ImmutableMap.Builder<>(); | private static SearchGroupsImpl toGroups(Collection<Node> nodes, double minActivedocsPercentage) {
Map<Integer, Group> groups = new HashMap<>();
for (Map.Entry<Integer, List<Node>> group : nodes.stream().collect(Collectors.groupingBy(Node::group)).entrySet()) {
Group g = new Group(group.getKey(), group.getValue());
groups.put(group.getKey(), g);
}
return new SearchGroupsImpl(Map.copyOf(groups), minActivedocsPercentage);
} | class SearchCluster implements NodeManager<Node> {
private static final Logger log = Logger.getLogger(SearchCluster.class.getName());
private final String clusterId;
private final VipStatus vipStatus;
private final PingFactory pingFactory;
private final SearchGroupsImpl groups;
private long nextLogTime = 0;
/**
* A search node on this local machine having the entire corpus, which we therefore
* should prefer to dispatch directly to, or empty if there is no such local search node.
* If there is one, we also maintain the VIP status of this container based on the availability
* of the corpus on this local node (up + has coverage), such that this node is taken out of rotation
* if it only queries this cluster when the local node cannot be used, to avoid unnecessary
* cross-node network traffic.
*/
private final Node localCorpusDispatchTarget;
public SearchCluster(String clusterId, double minActivedocsPercentage,
DispatchNodesConfig nodesConfig,
VipStatus vipStatus, PingFactory pingFactory) {
this(clusterId, minActivedocsPercentage, toNodes(nodesConfig), vipStatus, pingFactory);
}
public SearchCluster(String clusterId, double minActivedocsPercentage, List<Node> nodes,
VipStatus vipStatus, PingFactory pingFactory) {
this(clusterId, toGroups(nodes, minActivedocsPercentage), vipStatus, pingFactory);
}
public SearchCluster(String clusterId, SearchGroupsImpl groups, VipStatus vipStatus, PingFactory pingFactory) {
this.clusterId = clusterId;
this.vipStatus = vipStatus;
this.pingFactory = pingFactory;
this.groups = groups;
this.localCorpusDispatchTarget = findLocalCorpusDispatchTarget(HostName.getLocalhost(), groups);
}
@Override
public String name() { return clusterId; }
public void addMonitoring(ClusterMonitor<Node> clusterMonitor) {
for (var group : groups()) {
for (var node : group.nodes())
clusterMonitor.add(node, true);
}
}
private static Node findLocalCorpusDispatchTarget(String selfHostname, SearchGroups groups) {
List<Node> localSearchNodes = groups.groups().stream().flatMap(g -> g.nodes().stream())
.filter(node -> node.hostname().equals(selfHostname))
.toList();
if (localSearchNodes.size() != 1) return null;
Node localSearchNode = localSearchNodes.iterator().next();
Group localSearchGroup = groups.get(localSearchNode.group());
if (localSearchGroup.nodes().size() != 1) return null;
return localSearchNode;
}
private static List<Node> toNodes(DispatchNodesConfig nodesConfig) {
return nodesConfig.node().stream()
.map(n -> new Node(n.key(), n.host(), n.group()))
.toList();
}
public SearchGroups groupList() { return groups; }
public Group group(int id) { return groups.get(id); }
private Collection<Group> groups() { return groups.groups(); }
public int groupsWithSufficientCoverage() {
return (int)groups().stream().filter(Group::hasSufficientCoverage).count();
}
/**
* Returns the single, local node we should dispatch queries directly to,
* or empty if we should not dispatch directly.
*/
public Optional<Node> localCorpusDispatchTarget() {
if ( localCorpusDispatchTarget == null) return Optional.empty();
Group localSearchGroup = groups.get(localCorpusDispatchTarget.group());
if ( ! localSearchGroup.hasSufficientCoverage()) return Optional.empty();
if ( localCorpusDispatchTarget.isWorking() == Boolean.FALSE) return Optional.empty();
return Optional.of(localCorpusDispatchTarget);
}
private void updateWorkingState(Node node, boolean isWorking) {
node.setWorking(isWorking);
updateVipStatusOnNodeChange(node, isWorking);
}
/** Called by the cluster monitor when node state changes to working */
@Override
public void working(Node node) {
updateWorkingState(node, true);
}
/** Called by the cluster monitor when node state changes to failed */
@Override
public void failed(Node node) {
updateWorkingState(node, false);
}
private void updateSufficientCoverage(Group group, boolean sufficientCoverage) {
if (sufficientCoverage == group.hasSufficientCoverage()) return;
group.setHasSufficientCoverage(sufficientCoverage);
updateVipStatusOnCoverageChange(group, sufficientCoverage);
}
private void updateVipStatusOnNodeChange(Node node, boolean nodeIsWorking) {
if (localCorpusDispatchTarget == null) {
if (hasInformationAboutAllNodes())
setInRotationOnlyIf(hasWorkingNodes());
}
else if (usesLocalCorpusIn(node)) {
if (nodeIsWorking || groups().stream().map(Group::nodes).count() > 1)
setInRotationOnlyIf(nodeIsWorking);
}
}
private void updateVipStatusOnCoverageChange(Group group, boolean sufficientCoverage) {
if ( localCorpusDispatchTarget == null) {
}
else if (usesLocalCorpusIn(group)) {
setInRotationOnlyIf(sufficientCoverage);
}
}
private void setInRotationOnlyIf(boolean inRotation) {
if (inRotation)
vipStatus.addToRotation(clusterId);
else
vipStatus.removeFromRotation(clusterId);
}
public boolean hasInformationAboutAllNodes() {
return groups().stream().allMatch(g -> g.nodes().stream().allMatch(node -> node.isWorking() != null));
}
private boolean hasWorkingNodes() {
return groups().stream().anyMatch(g -> g.nodes().stream().anyMatch(node -> node.isWorking() != Boolean.FALSE));
}
private boolean usesLocalCorpusIn(Node node) {
return node.equals(localCorpusDispatchTarget);
}
private boolean usesLocalCorpusIn(Group group) {
return (localCorpusDispatchTarget != null) && localCorpusDispatchTarget.group() == group.id();
}
/** Used by the cluster monitor to manage node status */
@Override
public void ping(ClusterMonitor clusterMonitor, Node node, Executor executor) {
Pinger pinger = pingFactory.createPinger(node, clusterMonitor, new PongCallback(node, clusterMonitor));
pinger.ping();
}
private void pingIterationCompletedSingleGroup() {
Group group = groups().iterator().next();
group.aggregateNodeValues();
updateSufficientCoverage(group, true);
boolean sufficientCoverage = groups.isGroupCoverageSufficient(group.activeDocuments(), group.activeDocuments());
trackGroupCoverageChanges(group, sufficientCoverage, group.activeDocuments());
}
private void pingIterationCompletedMultipleGroups() {
groups().forEach(Group::aggregateNodeValues);
long medianDocuments = groups.medianDocumentsPerGroup();
for (Group group : groups()) {
boolean sufficientCoverage = groups.isGroupCoverageSufficient(group.activeDocuments(), medianDocuments);
updateSufficientCoverage(group, sufficientCoverage);
trackGroupCoverageChanges(group, sufficientCoverage, medianDocuments);
}
}
/**
* Update statistics after a round of issuing pings.
* Note that this doesn't wait for pings to return, so it will typically accumulate data from
* last rounds pinging, or potentially (although unlikely) some combination of new and old data.
*/
@Override
public void pingIterationCompleted() {
if (groups.size() == 1) {
pingIterationCompletedSingleGroup();
} else {
pingIterationCompletedMultipleGroups();
}
}
/**
* Calculate whether a subset of nodes in a group has enough coverage
*/
private void trackGroupCoverageChanges(Group group, boolean fullCoverage, long medianDocuments) {
if ( ! hasInformationAboutAllNodes()) return;
boolean changed = group.fullCoverageStatusChanged(fullCoverage);
if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) {
nextLogTime = System.currentTimeMillis() + 30 * 1000;
if (fullCoverage) {
log.info("Cluster " + clusterId + ": " + group + " has full coverage. " +
"Active documents: " + group.activeDocuments() + "/" + medianDocuments + ", " +
"Target active documents: " + group.targetActiveDocuments() + ", " +
"working nodes: " + group.workingNodes() + "/" + group.nodes().size());
} else {
StringBuilder unresponsive = new StringBuilder();
for (var node : group.nodes()) {
if (node.isWorking() != Boolean.TRUE)
unresponsive.append('\n').append(node);
}
log.warning("Cluster " + clusterId + ": " + group + " has reduced coverage: " +
"Active documents: " + group.activeDocuments() + "/" + medianDocuments + ", " +
"Target active documents: " + group.targetActiveDocuments() + ", " +
"working nodes: " + group.workingNodes() + "/" + group.nodes().size() +
", unresponsive nodes: " + (unresponsive.toString().isEmpty() ? " none" : unresponsive));
}
}
}
private static class PongCallback implements PongHandler {
private final ClusterMonitor<Node> clusterMonitor;
private final Node node;
PongCallback(Node node, ClusterMonitor<Node> clusterMonitor) {
this.node = node;
this.clusterMonitor = clusterMonitor;
}
@Override
public void handle(Pong pong) {
if (pong.badResponse()) {
clusterMonitor.failed(node, pong.error().get());
} else {
if (pong.activeDocuments().isPresent()) {
node.setActiveDocuments(pong.activeDocuments().get());
node.setTargetActiveDocuments(pong.targetActiveDocuments().get());
node.setBlockingWrites(pong.isBlockingWrites());
}
clusterMonitor.responded(node);
}
}
}
} | class SearchCluster implements NodeManager<Node> {
private static final Logger log = Logger.getLogger(SearchCluster.class.getName());
private final String clusterId;
private final VipStatus vipStatus;
private final PingFactory pingFactory;
private final SearchGroupsImpl groups;
private volatile long nextLogTime = 0;
/**
* A search node on this local machine having the entire corpus, which we therefore
* should prefer to dispatch directly to, or empty if there is no such local search node.
* If there is one, we also maintain the VIP status of this container based on the availability
* of the corpus on this local node (up + has coverage), such that this node is taken out of rotation
* if it only queries this cluster when the local node cannot be used, to avoid unnecessary
* cross-node network traffic.
*/
private final Node localCorpusDispatchTarget;
public SearchCluster(String clusterId, double minActivedocsPercentage,
DispatchNodesConfig nodesConfig,
VipStatus vipStatus, PingFactory pingFactory) {
this(clusterId, minActivedocsPercentage, toNodes(nodesConfig), vipStatus, pingFactory);
}
public SearchCluster(String clusterId, double minActivedocsPercentage, List<Node> nodes,
VipStatus vipStatus, PingFactory pingFactory) {
this(clusterId, toGroups(nodes, minActivedocsPercentage), vipStatus, pingFactory);
}
public SearchCluster(String clusterId, SearchGroupsImpl groups, VipStatus vipStatus, PingFactory pingFactory) {
this.clusterId = clusterId;
this.vipStatus = vipStatus;
this.pingFactory = pingFactory;
this.groups = groups;
this.localCorpusDispatchTarget = findLocalCorpusDispatchTarget(HostName.getLocalhost(), groups);
}
@Override
public String name() { return clusterId; }
public void addMonitoring(ClusterMonitor<Node> clusterMonitor) {
for (var group : groups()) {
for (var node : group.nodes())
clusterMonitor.add(node, true);
}
}
private static Node findLocalCorpusDispatchTarget(String selfHostname, SearchGroups groups) {
List<Node> localSearchNodes = groups.groups().stream().flatMap(g -> g.nodes().stream())
.filter(node -> node.hostname().equals(selfHostname))
.toList();
if (localSearchNodes.size() != 1) return null;
Node localSearchNode = localSearchNodes.iterator().next();
Group localSearchGroup = groups.get(localSearchNode.group());
if (localSearchGroup.nodes().size() != 1) return null;
return localSearchNode;
}
private static List<Node> toNodes(DispatchNodesConfig nodesConfig) {
return nodesConfig.node().stream()
.map(n -> new Node(n.key(), n.host(), n.group()))
.toList();
}
public SearchGroups groupList() { return groups; }
public Group group(int id) { return groups.get(id); }
private Collection<Group> groups() { return groups.groups(); }
public int groupsWithSufficientCoverage() {
return (int)groups().stream().filter(Group::hasSufficientCoverage).count();
}
/**
* Returns the single, local node we should dispatch queries directly to,
* or empty if we should not dispatch directly.
*/
public Optional<Node> localCorpusDispatchTarget() {
if ( localCorpusDispatchTarget == null) return Optional.empty();
Group localSearchGroup = groups.get(localCorpusDispatchTarget.group());
if ( ! localSearchGroup.hasSufficientCoverage()) return Optional.empty();
if ( localCorpusDispatchTarget.isWorking() == Boolean.FALSE) return Optional.empty();
return Optional.of(localCorpusDispatchTarget);
}
private void updateWorkingState(Node node, boolean isWorking) {
node.setWorking(isWorking);
updateVipStatusOnNodeChange(node, isWorking);
}
/** Called by the cluster monitor when node state changes to working */
@Override
public void working(Node node) {
updateWorkingState(node, true);
}
/** Called by the cluster monitor when node state changes to failed */
@Override
public void failed(Node node) {
updateWorkingState(node, false);
}
private void updateSufficientCoverage(Group group, boolean sufficientCoverage) {
if (sufficientCoverage == group.hasSufficientCoverage()) return;
group.setHasSufficientCoverage(sufficientCoverage);
updateVipStatusOnCoverageChange(group, sufficientCoverage);
}
private void updateVipStatusOnNodeChange(Node node, boolean nodeIsWorking) {
if (localCorpusDispatchTarget == null) {
if (hasInformationAboutAllNodes())
setInRotationOnlyIf(hasWorkingNodes());
}
else if (usesLocalCorpusIn(node)) {
if (nodeIsWorking || groups().stream().map(Group::nodes).count() > 1)
setInRotationOnlyIf(nodeIsWorking);
}
}
private void updateVipStatusOnCoverageChange(Group group, boolean sufficientCoverage) {
if ( localCorpusDispatchTarget == null) {
}
else if (usesLocalCorpusIn(group)) {
setInRotationOnlyIf(sufficientCoverage);
}
}
private void setInRotationOnlyIf(boolean inRotation) {
if (inRotation)
vipStatus.addToRotation(clusterId);
else
vipStatus.removeFromRotation(clusterId);
}
public boolean hasInformationAboutAllNodes() {
return groups().stream().allMatch(g -> g.nodes().stream().allMatch(node -> node.isWorking() != null));
}
private boolean hasWorkingNodes() {
return groups().stream().anyMatch(g -> g.nodes().stream().anyMatch(node -> node.isWorking() != Boolean.FALSE));
}
private boolean usesLocalCorpusIn(Node node) {
return node.equals(localCorpusDispatchTarget);
}
private boolean usesLocalCorpusIn(Group group) {
return (localCorpusDispatchTarget != null) && localCorpusDispatchTarget.group() == group.id();
}
/** Used by the cluster monitor to manage node status */
@Override
public void ping(ClusterMonitor clusterMonitor, Node node, Executor executor) {
Pinger pinger = pingFactory.createPinger(node, clusterMonitor, new PongCallback(node, clusterMonitor));
pinger.ping();
}
private void pingIterationCompletedSingleGroup() {
Group group = groups().iterator().next();
group.aggregateNodeValues();
updateSufficientCoverage(group, true);
boolean sufficientCoverage = groups.isGroupCoverageSufficient(group.activeDocuments(), group.activeDocuments());
trackGroupCoverageChanges(group, sufficientCoverage, group.activeDocuments());
}
private void pingIterationCompletedMultipleGroups() {
groups().forEach(Group::aggregateNodeValues);
long medianDocuments = groups.medianDocumentsPerGroup();
for (Group group : groups()) {
boolean sufficientCoverage = groups.isGroupCoverageSufficient(group.activeDocuments(), medianDocuments);
updateSufficientCoverage(group, sufficientCoverage);
trackGroupCoverageChanges(group, sufficientCoverage, medianDocuments);
}
}
/**
* Update statistics after a round of issuing pings.
* Note that this doesn't wait for pings to return, so it will typically accumulate data from
* last rounds pinging, or potentially (although unlikely) some combination of new and old data.
*/
@Override
public void pingIterationCompleted() {
if (groups.size() == 1) {
pingIterationCompletedSingleGroup();
} else {
pingIterationCompletedMultipleGroups();
}
}
/**
* Calculate whether a subset of nodes in a group has enough coverage
*/
private void trackGroupCoverageChanges(Group group, boolean fullCoverage, long medianDocuments) {
if ( ! hasInformationAboutAllNodes()) return;
boolean changed = group.fullCoverageStatusChanged(fullCoverage);
if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) {
nextLogTime = System.currentTimeMillis() + 30 * 1000;
if (fullCoverage) {
log.info("Cluster " + clusterId + ": " + group + " has full coverage. " +
"Active documents: " + group.activeDocuments() + "/" + medianDocuments + ", " +
"Target active documents: " + group.targetActiveDocuments() + ", " +
"working nodes: " + group.workingNodes() + "/" + group.nodes().size());
} else {
StringBuilder unresponsive = new StringBuilder();
for (var node : group.nodes()) {
if (node.isWorking() != Boolean.TRUE)
unresponsive.append('\n').append(node);
}
log.warning("Cluster " + clusterId + ": " + group + " has reduced coverage: " +
"Active documents: " + group.activeDocuments() + "/" + medianDocuments + ", " +
"Target active documents: " + group.targetActiveDocuments() + ", " +
"working nodes: " + group.workingNodes() + "/" + group.nodes().size() +
", unresponsive nodes: " + (unresponsive.toString().isEmpty() ? " none" : unresponsive));
}
}
}
private static class PongCallback implements PongHandler {
private final ClusterMonitor<Node> clusterMonitor;
private final Node node;
PongCallback(Node node, ClusterMonitor<Node> clusterMonitor) {
this.node = node;
this.clusterMonitor = clusterMonitor;
}
@Override
public void handle(Pong pong) {
if (pong.badResponse()) {
clusterMonitor.failed(node, pong.error().get());
} else {
if (pong.activeDocuments().isPresent()) {
node.setActiveDocuments(pong.activeDocuments().get());
node.setTargetActiveDocuments(pong.targetActiveDocuments().get());
node.setBlockingWrites(pong.isBlockingWrites());
}
clusterMonitor.responded(node);
}
}
}
} |
Ouch, you are right, will fix | public static FileDBRegistry create(AddFileInterface manager, Reader persistedState) {
try (BufferedReader reader = new BufferedReader(persistedState)) {
String ignoredFileSourceHost = reader.readLine();
if (ignoredFileSourceHost == null)
throw new RuntimeException("No file source host");
return new FileDBRegistry(manager, decode(reader), false);
} catch (IOException e) {
throw new RuntimeException("Error while reading pre-generated file registry", e);
}
} | return new FileDBRegistry(manager, decode(reader), false); | public static FileDBRegistry create(AddFileInterface manager, Reader persistedState) {
try (BufferedReader reader = new BufferedReader(persistedState)) {
String ignoredFileSourceHost = reader.readLine();
if (ignoredFileSourceHost == null)
throw new RuntimeException("No file source host");
return new FileDBRegistry(manager, decode(reader), true);
} catch (IOException e) {
throw new RuntimeException("Error while reading pre-generated file registry", e);
}
} | class FileDBRegistry implements FileRegistry {
private final boolean silenceNonExistingFiles;
private final AddFileInterface manager;
private final Map<String, FileReference> fileReferenceCache = new HashMap<>();
private static final String entryDelimiter = "\t";
private static final Pattern entryDelimiterPattern = Pattern.compile(entryDelimiter, Pattern.LITERAL);
public FileDBRegistry(AddFileInterface manager) {
this(manager, Map.of(), false);
}
private FileDBRegistry(AddFileInterface manager, Map<String, FileReference> knownReferences, boolean silenceNonExistingFiles) {
this.silenceNonExistingFiles = silenceNonExistingFiles;
this.manager = manager;
fileReferenceCache.putAll(knownReferences);
}
static Map<String, FileReference> decode(BufferedReader reader) {
Map<String, FileReference> refs = new HashMap<>();
try {
String line;
while ((line = reader.readLine()) != null) {
String[] parts = entryDelimiterPattern.split(line);
if (parts.length < 2)
throw new IllegalArgumentException("Cannot split '" + line + "' into two parts");
refs.put(parts[0], new FileReference(parts[1]));
}
} catch (IOException e) {
throw new RuntimeException("Error while reading pre-generated file registry", e);
}
return refs;
}
@Override
public synchronized FileReference addFile(String relativePath) {
if (relativePath.startsWith("/"))
throw new IllegalArgumentException(relativePath + " is not relative");
Optional<FileReference> cachedReference = Optional.ofNullable(fileReferenceCache.get(relativePath));
return cachedReference.orElseGet(() -> {
try {
FileReference newRef = manager.addFile(Path.fromString(relativePath));
fileReferenceCache.put(relativePath, newRef);
return newRef;
} catch (FileNotFoundException e) {
if (silenceNonExistingFiles) {
return new FileReference("non-existing-file");
} else {
throw new IllegalArgumentException(e);
}
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
);
}
@Override
public synchronized FileReference addUri(String uri) {
String relativePath = uriToRelativeFile(uri);
Optional<FileReference> cachedReference = Optional.ofNullable(fileReferenceCache.get(uri));
return cachedReference.orElseGet(() -> {
FileReference newRef = manager.addUri(uri, Path.fromString(relativePath));
fileReferenceCache.put(uri, newRef);
return newRef;
});
}
@Override
public FileReference addBlob(String blobName, ByteBuffer blob) {
String relativePath = blobToRelativeFile(blobName);
synchronized (this) {
Optional<FileReference> cachedReference = Optional.ofNullable(fileReferenceCache.get(blobName));
return cachedReference.orElseGet(() -> {
FileReference newRef = manager.addBlob(blob, Path.fromString(relativePath));
fileReferenceCache.put(blobName, newRef);
return newRef;
});
}
}
@Override
public synchronized List<Entry> export() {
List<Entry> entries = new ArrayList<>();
for (Map.Entry<String, FileReference> entry : fileReferenceCache.entrySet()) {
entries.add(new Entry(entry.getKey(), entry.getValue()));
}
return entries;
}
synchronized Map<String, FileReference> getMap() {
return ImmutableMap.copyOf(fileReferenceCache);
}
public static String exportRegistry(FileRegistry registry) {
List<Entry> entries = registry.export();
StringBuilder builder = new StringBuilder();
builder.append(HostName.getLocalhost()).append('\n');
for (FileRegistry.Entry entry : entries) {
builder.append(entry.relativePath).append(entryDelimiter).append(entry.reference.value()).append('\n');
}
return builder.toString();
}
private static String uriToRelativeFile(String uri) {
String relative = "uri/" + XXHashFactory.fastestJavaInstance().hash64().hash(ByteBuffer.wrap(Utf8.toBytes(uri)), 0);
if (uri.endsWith(".json")) {
relative += ".json";
} else if (uri.endsWith(".json.lz4")) {
relative += ".json.lz4";
} else if (uri.endsWith(".lz4")) {
relative += ".lz4";
}
return relative;
}
private static String blobToRelativeFile(String blobName) {
return "blob/" + blobName;
}
} | class FileDBRegistry implements FileRegistry {
private final boolean silenceNonExistingFiles;
private final AddFileInterface manager;
private final Map<String, FileReference> fileReferenceCache = new HashMap<>();
private static final String entryDelimiter = "\t";
private static final Pattern entryDelimiterPattern = Pattern.compile(entryDelimiter, Pattern.LITERAL);
public FileDBRegistry(AddFileInterface manager) {
this(manager, Map.of(), false);
}
private FileDBRegistry(AddFileInterface manager, Map<String, FileReference> knownReferences, boolean silenceNonExistingFiles) {
this.silenceNonExistingFiles = silenceNonExistingFiles;
this.manager = manager;
fileReferenceCache.putAll(knownReferences);
}
static Map<String, FileReference> decode(BufferedReader reader) {
Map<String, FileReference> refs = new HashMap<>();
try {
String line;
while ((line = reader.readLine()) != null) {
String[] parts = entryDelimiterPattern.split(line);
if (parts.length < 2)
throw new IllegalArgumentException("Cannot split '" + line + "' into two parts");
refs.put(parts[0], new FileReference(parts[1]));
}
} catch (IOException e) {
throw new RuntimeException("Error while reading pre-generated file registry", e);
}
return refs;
}
@Override
public synchronized FileReference addFile(String relativePath) {
if (relativePath.startsWith("/"))
throw new IllegalArgumentException(relativePath + " is not relative");
Optional<FileReference> cachedReference = Optional.ofNullable(fileReferenceCache.get(relativePath));
return cachedReference.orElseGet(() -> {
try {
FileReference newRef = manager.addFile(Path.fromString(relativePath));
fileReferenceCache.put(relativePath, newRef);
return newRef;
} catch (FileNotFoundException e) {
if (silenceNonExistingFiles) {
return new FileReference("non-existing-file");
} else {
throw new IllegalArgumentException(e);
}
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
);
}
@Override
public synchronized FileReference addUri(String uri) {
String relativePath = uriToRelativeFile(uri);
Optional<FileReference> cachedReference = Optional.ofNullable(fileReferenceCache.get(uri));
return cachedReference.orElseGet(() -> {
FileReference newRef = manager.addUri(uri, Path.fromString(relativePath));
fileReferenceCache.put(uri, newRef);
return newRef;
});
}
@Override
public FileReference addBlob(String blobName, ByteBuffer blob) {
String relativePath = blobToRelativeFile(blobName);
synchronized (this) {
Optional<FileReference> cachedReference = Optional.ofNullable(fileReferenceCache.get(blobName));
return cachedReference.orElseGet(() -> {
FileReference newRef = manager.addBlob(blob, Path.fromString(relativePath));
fileReferenceCache.put(blobName, newRef);
return newRef;
});
}
}
@Override
public synchronized List<Entry> export() {
List<Entry> entries = new ArrayList<>();
for (Map.Entry<String, FileReference> entry : fileReferenceCache.entrySet()) {
entries.add(new Entry(entry.getKey(), entry.getValue()));
}
return entries;
}
synchronized Map<String, FileReference> getMap() {
return ImmutableMap.copyOf(fileReferenceCache);
}
public static String exportRegistry(FileRegistry registry) {
List<Entry> entries = registry.export();
StringBuilder builder = new StringBuilder();
builder.append(HostName.getLocalhost()).append('\n');
for (FileRegistry.Entry entry : entries) {
builder.append(entry.relativePath).append(entryDelimiter).append(entry.reference.value()).append('\n');
}
return builder.toString();
}
private static String uriToRelativeFile(String uri) {
String relative = "uri/" + XXHashFactory.fastestJavaInstance().hash64().hash(ByteBuffer.wrap(Utf8.toBytes(uri)), 0);
if (uri.endsWith(".json")) {
relative += ".json";
} else if (uri.endsWith(".json.lz4")) {
relative += ".json.lz4";
} else if (uri.endsWith(".lz4")) {
relative += ".lz4";
}
return relative;
}
private static String blobToRelativeFile(String blobName) {
return "blob/" + blobName;
}
} |
I guess this matches that change ... so it's intentional. | public void importAndExport() throws IOException {
TemporaryFolder tmpDir = new TemporaryFolder();
tmpDir.create();
AddFileInterface fileManager = new ApplicationFileManager(new File(APP), new FileDirectory(tmpDir.newFolder()), false);
FileRegistry fileRegistry = new FileDBRegistry(fileManager);
assertEquals(FOO_REF, fileRegistry.addFile(FOO_FILE));
try {
fileRegistry.addFile(NO_FOO_FILE);
fail();
} catch (IllegalArgumentException e) {
assertEquals("src/test/apps/zkapp/files/no_foo.json (No such file or directory)", e.getCause().getMessage());
}
try {
fileRegistry.addFile(BOO_FILE);
fail();
} catch (IllegalArgumentException e) {
assertEquals("/files/no_foo.json is not relative", e.getMessage());
}
try {
fileRegistry.addFile(BAR_FILE);
fail();
} catch (IllegalArgumentException e) {
assertEquals("'..' is not allowed in path", e.getMessage());
}
assertEquals(BLOB_REF, fileRegistry.addBlob(BLOB_NAME, ByteBuffer.wrap(BLOB.getBytes(StandardCharsets.UTF_8))));
String serializedRegistry = FileDBRegistry.exportRegistry(fileRegistry);
FileDBRegistry importedRegistry = FileDBRegistry.create(fileManager, new StringReader(serializedRegistry));
assertEquals(Set.of(BLOB_NAME, FOO_FILE), importedRegistry.getMap().keySet());
assertEquals(BLOB_REF, importedRegistry.getMap().get(BLOB_NAME));
assertEquals(FOO_REF, importedRegistry.getMap().get(FOO_FILE));
assertEquals(2, importedRegistry.export().size());
checkConsistentEntry(fileRegistry.export().get(0), importedRegistry);
checkConsistentEntry(fileRegistry.export().get(1), importedRegistry);
try {
importedRegistry.addFile(NO_FOO_FILE);
} catch (Exception e ) {
assertEquals("java.io.FileNotFoundException: src/test/apps/zkapp/files/no_foo.json (No such file or directory)", e.getMessage());
}
assertEquals(2, importedRegistry.export().size());
tmpDir.delete();
} | assertEquals("java.io.FileNotFoundException: src/test/apps/zkapp/files/no_foo.json (No such file or directory)", e.getMessage()); | public void importAndExport() throws IOException {
TemporaryFolder tmpDir = new TemporaryFolder();
tmpDir.create();
AddFileInterface fileManager =
new ApplicationFileManager(new File(APP), new FileDirectory(tmpDir.newFolder(), new InMemoryFlagSource()), false);
FileRegistry fileRegistry = new FileDBRegistry(fileManager);
assertEquals(FOO_REF, fileRegistry.addFile(FOO_FILE));
try {
fileRegistry.addFile(NO_FOO_FILE);
fail();
} catch (IllegalArgumentException e) {
assertEquals("src/test/apps/zkapp/files/no_foo.json (No such file or directory)", e.getCause().getMessage());
}
try {
fileRegistry.addFile(BOO_FILE);
fail();
} catch (IllegalArgumentException e) {
assertEquals("/files/no_foo.json is not relative", e.getMessage());
}
try {
fileRegistry.addFile(BAR_FILE);
fail();
} catch (IllegalArgumentException e) {
assertEquals("'..' is not allowed in path", e.getMessage());
}
assertEquals(BLOB_REF, fileRegistry.addBlob(BLOB_NAME, ByteBuffer.wrap(BLOB.getBytes(StandardCharsets.UTF_8))));
String serializedRegistry = FileDBRegistry.exportRegistry(fileRegistry);
FileDBRegistry importedRegistry = FileDBRegistry.create(fileManager, new StringReader(serializedRegistry));
assertEquals(Set.of(BLOB_NAME, FOO_FILE), importedRegistry.getMap().keySet());
assertEquals(BLOB_REF, importedRegistry.getMap().get(BLOB_NAME));
assertEquals(FOO_REF, importedRegistry.getMap().get(FOO_FILE));
assertEquals(2, importedRegistry.export().size());
checkConsistentEntry(fileRegistry.export().get(0), importedRegistry);
checkConsistentEntry(fileRegistry.export().get(1), importedRegistry);
importedRegistry.addFile(NO_FOO_FILE);
assertEquals(2, importedRegistry.export().size());
tmpDir.delete();
} | class FileDBRegistryTestCase {
private static final String BLOB = "Some blob";
private static final String APP = "src/test/apps/zkapp";
private static final String FOO_FILE = "files/foo.json";
private static final String NO_FOO_FILE = "files/no_foo.json";
private static final String BOO_FILE = "/files/no_foo.json";
private static final String BAR_FILE = "../files/no_foo.json";
private static final String BLOB_NAME = "././myblob.name";
private static final FileReference BLOB_REF = new FileReference("12f292a25163dd9");
private static final FileReference FOO_REF = new FileReference("b5ce94ca1feae86c");
@Test
public void uriResourcesNotSupportedWhenHosted() {
assertEquals("URI type resources are not supported in this Vespa cloud",
assertThrows(IllegalArgumentException.class,
() -> new ApplicationFileManager(null, null, true).addUri(null, null))
.getMessage());
}
@Test
void checkConsistentEntry(FileRegistry.Entry entry, FileRegistry registry) {
assertEquals(entry.reference, registry.addFile(entry.relativePath));
}
} | class FileDBRegistryTestCase {
private static final String BLOB = "Some blob";
private static final String APP = "src/test/apps/zkapp";
private static final String FOO_FILE = "files/foo.json";
private static final String NO_FOO_FILE = "files/no_foo.json";
private static final String BOO_FILE = "/files/no_foo.json";
private static final String BAR_FILE = "../files/no_foo.json";
private static final String BLOB_NAME = "././myblob.name";
private static final FileReference BLOB_REF = new FileReference("12f292a25163dd9");
private static final FileReference FOO_REF = new FileReference("b5ce94ca1feae86c");
@Test
public void uriResourcesNotSupportedWhenHosted() {
assertEquals("URI type resources are not supported in this Vespa cloud",
assertThrows(IllegalArgumentException.class,
() -> new ApplicationFileManager(null, null, true).addUri(null, null))
.getMessage());
}
@Test
void checkConsistentEntry(FileRegistry.Entry entry, FileRegistry registry) {
assertEquals(entry.reference, registry.addFile(entry.relativePath));
}
} |
Won't you need to hold the lock while evaluating whether or not to delete the file, too? The simplest is perhaps to pass a condition to evaluate inside the delete method, and re-run it while holding the lock there? | public List<String> deleteUnusedFileDistributionReferences(FileDirectory fileDirectory, Duration keepFileReferencesDuration) {
Set<String> fileReferencesInUse = getFileReferencesInUse();
log.log(Level.FINE, () -> "File references in use : " + fileReferencesInUse);
Instant instant = clock.instant().minus(keepFileReferencesDuration);
log.log(Level.FINE, () -> "Remove unused file references last modified before " + instant);
List<String> fileReferencesToDelete = sortedUnusedFileReferences(fileDirectory.getRoot(), fileReferencesInUse, instant);
if (fileReferencesToDelete.size() > 0) {
log.log(Level.FINE, () -> "Will delete file references not in use: " + fileReferencesToDelete);
fileReferencesToDelete.forEach(fileReference -> fileDirectory.delete(new FileReference(fileReference)));
}
return fileReferencesToDelete;
} | fileReferencesToDelete.forEach(fileReference -> fileDirectory.delete(new FileReference(fileReference))); | public List<String> deleteUnusedFileDistributionReferences(FileDirectory fileDirectory, Duration keepFileReferencesDuration) {
Set<String> fileReferencesInUse = getFileReferencesInUse();
log.log(Level.FINE, () -> "File references in use : " + fileReferencesInUse);
Instant instant = clock.instant().minus(keepFileReferencesDuration);
log.log(Level.FINE, () -> "Remove unused file references last modified before " + instant);
List<String> fileReferencesToDelete = sortedUnusedFileReferences(fileDirectory.getRoot(), fileReferencesInUse, instant);
if (fileReferencesToDelete.size() > 0) {
log.log(Level.FINE, () -> "Will delete file references not in use: " + fileReferencesToDelete);
fileReferencesToDelete.forEach(fileReference -> fileDirectory.delete(new FileReference(fileReference), this::isFileReferenceInUse));
}
return fileReferencesToDelete;
} | class Builder {
private TenantRepository tenantRepository;
private Optional<Provisioner> hostProvisioner;
private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher());
private Clock clock = Clock.systemUTC();
private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build();
private Orchestrator orchestrator;
private LogRetriever logRetriever = new LogRetriever();
private TesterClient testerClient = new TesterClient();
private Metric metric = new NullMetric();
private SecretStoreValidator secretStoreValidator = new SecretStoreValidator(new SecretStoreProvider().get());
private FlagSource flagSource = new InMemoryFlagSource();
private ConfigConvergenceChecker configConvergenceChecker = new ConfigConvergenceChecker();
public Builder withTenantRepository(TenantRepository tenantRepository) {
this.tenantRepository = tenantRepository;
return this;
}
public Builder withClock(Clock clock) {
this.clock = clock;
return this;
}
public Builder withProvisioner(Provisioner provisioner) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = Optional.ofNullable(provisioner);
return this;
}
public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = hostProvisionerProvider.getHostProvisioner();
return this;
}
public Builder withHttpProxy(HttpProxy httpProxy) {
this.httpProxy = httpProxy;
return this;
}
public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) {
this.configserverConfig = configserverConfig;
return this;
}
public Builder withOrchestrator(Orchestrator orchestrator) {
this.orchestrator = orchestrator;
return this;
}
public Builder withLogRetriever(LogRetriever logRetriever) {
this.logRetriever = logRetriever;
return this;
}
public Builder withTesterClient(TesterClient testerClient) {
this.testerClient = testerClient;
return this;
}
public Builder withFlagSource(FlagSource flagSource) {
this.flagSource = flagSource;
return this;
}
public Builder withMetric(Metric metric) {
this.metric = metric;
return this;
}
public Builder withSecretStoreValidator(SecretStoreValidator secretStoreValidator) {
this.secretStoreValidator = secretStoreValidator;
return this;
}
public Builder withConfigConvergenceChecker(ConfigConvergenceChecker configConvergenceChecker) {
this.configConvergenceChecker = configConvergenceChecker;
return this;
}
public ApplicationRepository build() {
return new ApplicationRepository(tenantRepository,
hostProvisioner,
InfraDeployerProvider.empty().getInfraDeployer(),
configConvergenceChecker,
httpProxy,
configserverConfig,
orchestrator,
logRetriever,
clock,
testerClient,
metric,
secretStoreValidator,
ClusterReindexingStatusClient.DUMMY_INSTANCE,
flagSource);
}
} | class Builder {
private TenantRepository tenantRepository;
private Optional<Provisioner> hostProvisioner;
private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher());
private Clock clock = Clock.systemUTC();
private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build();
private Orchestrator orchestrator;
private LogRetriever logRetriever = new LogRetriever();
private TesterClient testerClient = new TesterClient();
private Metric metric = new NullMetric();
private SecretStoreValidator secretStoreValidator = new SecretStoreValidator(new SecretStoreProvider().get());
private FlagSource flagSource = new InMemoryFlagSource();
private ConfigConvergenceChecker configConvergenceChecker = new ConfigConvergenceChecker();
public Builder withTenantRepository(TenantRepository tenantRepository) {
this.tenantRepository = tenantRepository;
return this;
}
public Builder withClock(Clock clock) {
this.clock = clock;
return this;
}
public Builder withProvisioner(Provisioner provisioner) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = Optional.ofNullable(provisioner);
return this;
}
public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = hostProvisionerProvider.getHostProvisioner();
return this;
}
public Builder withHttpProxy(HttpProxy httpProxy) {
this.httpProxy = httpProxy;
return this;
}
public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) {
this.configserverConfig = configserverConfig;
return this;
}
public Builder withOrchestrator(Orchestrator orchestrator) {
this.orchestrator = orchestrator;
return this;
}
public Builder withLogRetriever(LogRetriever logRetriever) {
this.logRetriever = logRetriever;
return this;
}
public Builder withTesterClient(TesterClient testerClient) {
this.testerClient = testerClient;
return this;
}
public Builder withFlagSource(FlagSource flagSource) {
this.flagSource = flagSource;
return this;
}
public Builder withMetric(Metric metric) {
this.metric = metric;
return this;
}
public Builder withSecretStoreValidator(SecretStoreValidator secretStoreValidator) {
this.secretStoreValidator = secretStoreValidator;
return this;
}
public Builder withConfigConvergenceChecker(ConfigConvergenceChecker configConvergenceChecker) {
this.configConvergenceChecker = configConvergenceChecker;
return this;
}
public ApplicationRepository build() {
return new ApplicationRepository(tenantRepository,
hostProvisioner,
InfraDeployerProvider.empty().getInfraDeployer(),
configConvergenceChecker,
httpProxy,
configserverConfig,
orchestrator,
logRetriever,
clock,
testerClient,
metric,
secretStoreValidator,
ClusterReindexingStatusClient.DUMMY_INSTANCE,
flagSource);
}
} |
No, I thought this was caused by another change, but this should be reverted | public void importAndExport() throws IOException {
TemporaryFolder tmpDir = new TemporaryFolder();
tmpDir.create();
AddFileInterface fileManager = new ApplicationFileManager(new File(APP), new FileDirectory(tmpDir.newFolder()), false);
FileRegistry fileRegistry = new FileDBRegistry(fileManager);
assertEquals(FOO_REF, fileRegistry.addFile(FOO_FILE));
try {
fileRegistry.addFile(NO_FOO_FILE);
fail();
} catch (IllegalArgumentException e) {
assertEquals("src/test/apps/zkapp/files/no_foo.json (No such file or directory)", e.getCause().getMessage());
}
try {
fileRegistry.addFile(BOO_FILE);
fail();
} catch (IllegalArgumentException e) {
assertEquals("/files/no_foo.json is not relative", e.getMessage());
}
try {
fileRegistry.addFile(BAR_FILE);
fail();
} catch (IllegalArgumentException e) {
assertEquals("'..' is not allowed in path", e.getMessage());
}
assertEquals(BLOB_REF, fileRegistry.addBlob(BLOB_NAME, ByteBuffer.wrap(BLOB.getBytes(StandardCharsets.UTF_8))));
String serializedRegistry = FileDBRegistry.exportRegistry(fileRegistry);
FileDBRegistry importedRegistry = FileDBRegistry.create(fileManager, new StringReader(serializedRegistry));
assertEquals(Set.of(BLOB_NAME, FOO_FILE), importedRegistry.getMap().keySet());
assertEquals(BLOB_REF, importedRegistry.getMap().get(BLOB_NAME));
assertEquals(FOO_REF, importedRegistry.getMap().get(FOO_FILE));
assertEquals(2, importedRegistry.export().size());
checkConsistentEntry(fileRegistry.export().get(0), importedRegistry);
checkConsistentEntry(fileRegistry.export().get(1), importedRegistry);
try {
importedRegistry.addFile(NO_FOO_FILE);
} catch (Exception e ) {
assertEquals("java.io.FileNotFoundException: src/test/apps/zkapp/files/no_foo.json (No such file or directory)", e.getMessage());
}
assertEquals(2, importedRegistry.export().size());
tmpDir.delete();
} | assertEquals("java.io.FileNotFoundException: src/test/apps/zkapp/files/no_foo.json (No such file or directory)", e.getMessage()); | public void importAndExport() throws IOException {
TemporaryFolder tmpDir = new TemporaryFolder();
tmpDir.create();
AddFileInterface fileManager =
new ApplicationFileManager(new File(APP), new FileDirectory(tmpDir.newFolder(), new InMemoryFlagSource()), false);
FileRegistry fileRegistry = new FileDBRegistry(fileManager);
assertEquals(FOO_REF, fileRegistry.addFile(FOO_FILE));
try {
fileRegistry.addFile(NO_FOO_FILE);
fail();
} catch (IllegalArgumentException e) {
assertEquals("src/test/apps/zkapp/files/no_foo.json (No such file or directory)", e.getCause().getMessage());
}
try {
fileRegistry.addFile(BOO_FILE);
fail();
} catch (IllegalArgumentException e) {
assertEquals("/files/no_foo.json is not relative", e.getMessage());
}
try {
fileRegistry.addFile(BAR_FILE);
fail();
} catch (IllegalArgumentException e) {
assertEquals("'..' is not allowed in path", e.getMessage());
}
assertEquals(BLOB_REF, fileRegistry.addBlob(BLOB_NAME, ByteBuffer.wrap(BLOB.getBytes(StandardCharsets.UTF_8))));
String serializedRegistry = FileDBRegistry.exportRegistry(fileRegistry);
FileDBRegistry importedRegistry = FileDBRegistry.create(fileManager, new StringReader(serializedRegistry));
assertEquals(Set.of(BLOB_NAME, FOO_FILE), importedRegistry.getMap().keySet());
assertEquals(BLOB_REF, importedRegistry.getMap().get(BLOB_NAME));
assertEquals(FOO_REF, importedRegistry.getMap().get(FOO_FILE));
assertEquals(2, importedRegistry.export().size());
checkConsistentEntry(fileRegistry.export().get(0), importedRegistry);
checkConsistentEntry(fileRegistry.export().get(1), importedRegistry);
importedRegistry.addFile(NO_FOO_FILE);
assertEquals(2, importedRegistry.export().size());
tmpDir.delete();
} | class FileDBRegistryTestCase {
private static final String BLOB = "Some blob";
private static final String APP = "src/test/apps/zkapp";
private static final String FOO_FILE = "files/foo.json";
private static final String NO_FOO_FILE = "files/no_foo.json";
private static final String BOO_FILE = "/files/no_foo.json";
private static final String BAR_FILE = "../files/no_foo.json";
private static final String BLOB_NAME = "././myblob.name";
private static final FileReference BLOB_REF = new FileReference("12f292a25163dd9");
private static final FileReference FOO_REF = new FileReference("b5ce94ca1feae86c");
@Test
public void uriResourcesNotSupportedWhenHosted() {
assertEquals("URI type resources are not supported in this Vespa cloud",
assertThrows(IllegalArgumentException.class,
() -> new ApplicationFileManager(null, null, true).addUri(null, null))
.getMessage());
}
@Test
void checkConsistentEntry(FileRegistry.Entry entry, FileRegistry registry) {
assertEquals(entry.reference, registry.addFile(entry.relativePath));
}
} | class FileDBRegistryTestCase {
private static final String BLOB = "Some blob";
private static final String APP = "src/test/apps/zkapp";
private static final String FOO_FILE = "files/foo.json";
private static final String NO_FOO_FILE = "files/no_foo.json";
private static final String BOO_FILE = "/files/no_foo.json";
private static final String BAR_FILE = "../files/no_foo.json";
private static final String BLOB_NAME = "././myblob.name";
private static final FileReference BLOB_REF = new FileReference("12f292a25163dd9");
private static final FileReference FOO_REF = new FileReference("b5ce94ca1feae86c");
@Test
public void uriResourcesNotSupportedWhenHosted() {
assertEquals("URI type resources are not supported in this Vespa cloud",
assertThrows(IllegalArgumentException.class,
() -> new ApplicationFileManager(null, null, true).addUri(null, null))
.getMessage());
}
@Test
void checkConsistentEntry(FileRegistry.Entry entry, FileRegistry registry) {
assertEquals(entry.reference, registry.addFile(entry.relativePath));
}
} |
Yeah, that's right, working on it. | public List<String> deleteUnusedFileDistributionReferences(FileDirectory fileDirectory, Duration keepFileReferencesDuration) {
Set<String> fileReferencesInUse = getFileReferencesInUse();
log.log(Level.FINE, () -> "File references in use : " + fileReferencesInUse);
Instant instant = clock.instant().minus(keepFileReferencesDuration);
log.log(Level.FINE, () -> "Remove unused file references last modified before " + instant);
List<String> fileReferencesToDelete = sortedUnusedFileReferences(fileDirectory.getRoot(), fileReferencesInUse, instant);
if (fileReferencesToDelete.size() > 0) {
log.log(Level.FINE, () -> "Will delete file references not in use: " + fileReferencesToDelete);
fileReferencesToDelete.forEach(fileReference -> fileDirectory.delete(new FileReference(fileReference)));
}
return fileReferencesToDelete;
} | fileReferencesToDelete.forEach(fileReference -> fileDirectory.delete(new FileReference(fileReference))); | public List<String> deleteUnusedFileDistributionReferences(FileDirectory fileDirectory, Duration keepFileReferencesDuration) {
Set<String> fileReferencesInUse = getFileReferencesInUse();
log.log(Level.FINE, () -> "File references in use : " + fileReferencesInUse);
Instant instant = clock.instant().minus(keepFileReferencesDuration);
log.log(Level.FINE, () -> "Remove unused file references last modified before " + instant);
List<String> fileReferencesToDelete = sortedUnusedFileReferences(fileDirectory.getRoot(), fileReferencesInUse, instant);
if (fileReferencesToDelete.size() > 0) {
log.log(Level.FINE, () -> "Will delete file references not in use: " + fileReferencesToDelete);
fileReferencesToDelete.forEach(fileReference -> fileDirectory.delete(new FileReference(fileReference), this::isFileReferenceInUse));
}
return fileReferencesToDelete;
} | class Builder {
private TenantRepository tenantRepository;
private Optional<Provisioner> hostProvisioner;
private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher());
private Clock clock = Clock.systemUTC();
private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build();
private Orchestrator orchestrator;
private LogRetriever logRetriever = new LogRetriever();
private TesterClient testerClient = new TesterClient();
private Metric metric = new NullMetric();
private SecretStoreValidator secretStoreValidator = new SecretStoreValidator(new SecretStoreProvider().get());
private FlagSource flagSource = new InMemoryFlagSource();
private ConfigConvergenceChecker configConvergenceChecker = new ConfigConvergenceChecker();
public Builder withTenantRepository(TenantRepository tenantRepository) {
this.tenantRepository = tenantRepository;
return this;
}
public Builder withClock(Clock clock) {
this.clock = clock;
return this;
}
public Builder withProvisioner(Provisioner provisioner) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = Optional.ofNullable(provisioner);
return this;
}
public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = hostProvisionerProvider.getHostProvisioner();
return this;
}
public Builder withHttpProxy(HttpProxy httpProxy) {
this.httpProxy = httpProxy;
return this;
}
public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) {
this.configserverConfig = configserverConfig;
return this;
}
public Builder withOrchestrator(Orchestrator orchestrator) {
this.orchestrator = orchestrator;
return this;
}
public Builder withLogRetriever(LogRetriever logRetriever) {
this.logRetriever = logRetriever;
return this;
}
public Builder withTesterClient(TesterClient testerClient) {
this.testerClient = testerClient;
return this;
}
public Builder withFlagSource(FlagSource flagSource) {
this.flagSource = flagSource;
return this;
}
public Builder withMetric(Metric metric) {
this.metric = metric;
return this;
}
public Builder withSecretStoreValidator(SecretStoreValidator secretStoreValidator) {
this.secretStoreValidator = secretStoreValidator;
return this;
}
public Builder withConfigConvergenceChecker(ConfigConvergenceChecker configConvergenceChecker) {
this.configConvergenceChecker = configConvergenceChecker;
return this;
}
public ApplicationRepository build() {
return new ApplicationRepository(tenantRepository,
hostProvisioner,
InfraDeployerProvider.empty().getInfraDeployer(),
configConvergenceChecker,
httpProxy,
configserverConfig,
orchestrator,
logRetriever,
clock,
testerClient,
metric,
secretStoreValidator,
ClusterReindexingStatusClient.DUMMY_INSTANCE,
flagSource);
}
} | class Builder {
private TenantRepository tenantRepository;
private Optional<Provisioner> hostProvisioner;
private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher());
private Clock clock = Clock.systemUTC();
private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build();
private Orchestrator orchestrator;
private LogRetriever logRetriever = new LogRetriever();
private TesterClient testerClient = new TesterClient();
private Metric metric = new NullMetric();
private SecretStoreValidator secretStoreValidator = new SecretStoreValidator(new SecretStoreProvider().get());
private FlagSource flagSource = new InMemoryFlagSource();
private ConfigConvergenceChecker configConvergenceChecker = new ConfigConvergenceChecker();
public Builder withTenantRepository(TenantRepository tenantRepository) {
this.tenantRepository = tenantRepository;
return this;
}
public Builder withClock(Clock clock) {
this.clock = clock;
return this;
}
public Builder withProvisioner(Provisioner provisioner) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = Optional.ofNullable(provisioner);
return this;
}
public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) {
if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder");
this.hostProvisioner = hostProvisionerProvider.getHostProvisioner();
return this;
}
public Builder withHttpProxy(HttpProxy httpProxy) {
this.httpProxy = httpProxy;
return this;
}
public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) {
this.configserverConfig = configserverConfig;
return this;
}
public Builder withOrchestrator(Orchestrator orchestrator) {
this.orchestrator = orchestrator;
return this;
}
public Builder withLogRetriever(LogRetriever logRetriever) {
this.logRetriever = logRetriever;
return this;
}
public Builder withTesterClient(TesterClient testerClient) {
this.testerClient = testerClient;
return this;
}
public Builder withFlagSource(FlagSource flagSource) {
this.flagSource = flagSource;
return this;
}
public Builder withMetric(Metric metric) {
this.metric = metric;
return this;
}
public Builder withSecretStoreValidator(SecretStoreValidator secretStoreValidator) {
this.secretStoreValidator = secretStoreValidator;
return this;
}
public Builder withConfigConvergenceChecker(ConfigConvergenceChecker configConvergenceChecker) {
this.configConvergenceChecker = configConvergenceChecker;
return this;
}
public ApplicationRepository build() {
return new ApplicationRepository(tenantRepository,
hostProvisioner,
InfraDeployerProvider.empty().getInfraDeployer(),
configConvergenceChecker,
httpProxy,
configserverConfig,
orchestrator,
logRetriever,
clock,
testerClient,
metric,
secretStoreValidator,
ClusterReindexingStatusClient.DUMMY_INSTANCE,
flagSource);
}
} |
Given the already long list of 13 types in AWS already with a range of core numbers, I think we need a 'type'. If count is number of cores, I think that vcpu is a better term. | public void testRequestingSpecificNodeResources() {
String services =
"""
<?xml version='1.0' encoding='utf-8' ?>
<services>
<admin version='4.0'>
<logservers>
<nodes count='1' dedicated='true'>
<resources vcpu='0.1' memory='0.2Gb' disk='300Gb' disk-speed='slow'/>
</nodes>
</logservers>
<slobroks>
<nodes count='2' dedicated='true'>
<resources vcpu='0.1' memory='0.3Gb' disk='1Gb' bandwidth='500Mbps'/>
</nodes>
</slobroks>
</admin>
<container version='1.0' id='container'>
<nodes count='4'>
<resources vcpu='12' memory='10Gb' disk='30Gb' architecture='arm64'/>
</nodes>
</container>
<container version='1.0' id='container2'>
<nodes count='2'>
<resources vcpu='4' memory='16Gb' disk='125Gb'>
<gpu count='1' memory='16Gb'/>
</resources>
</nodes>
</container>
<content version='1.0' id='foo'>
<documents>
<document type='type1' mode='index'/>
</documents>
<nodes count='5'>
<resources vcpu='8' memory='200Gb' disk='1Pb'/>
</nodes>
</content>
<content version='1.0' id='bar'>
<documents>
<document type='type1' mode='index'/>
</documents>
<nodes count='6'>
<resources vcpu='10' memory='64Gb' disk='200Gb'/>
</nodes>
</content>
</services>
""";
int totalHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(0.1, 0.2, 300, 0.3, NodeResources.DiskSpeed.slow), 1);
tester.addHosts(new NodeResources(0.1, 0.3, 1, 0.5), 2);
tester.addHosts(new NodeResources(12, 10, 30, 0.3,
NodeResources.DiskSpeed.fast, NodeResources.StorageType.local, NodeResources.Architecture.arm64), 4);
tester.addHosts(new NodeResources(4, 16, 125, 10,
NodeResources.DiskSpeed.fast, NodeResources.StorageType.local, Architecture.x86_64,
new NodeResources.GpuResources(1, 16)), 4);
tester.addHosts(new NodeResources(8, 200, 1000000, 0.3), 5);
tester.addHosts(new NodeResources(10, 64, 200, 0.3), 6);
tester.addHosts(new NodeResources(0.5, 2, 10, 0.3), 6);
VespaModel model = tester.createModel(services, true, 0);
assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size());
} | <gpu count='1' memory='16Gb'/> | public void testRequestingSpecificNodeResources() {
String services =
"""
<?xml version='1.0' encoding='utf-8' ?>
<services>
<admin version='4.0'>
<logservers>
<nodes count='1' dedicated='true'>
<resources vcpu='0.1' memory='0.2Gb' disk='300Gb' disk-speed='slow'/>
</nodes>
</logservers>
<slobroks>
<nodes count='2' dedicated='true'>
<resources vcpu='0.1' memory='0.3Gb' disk='1Gb' bandwidth='500Mbps'/>
</nodes>
</slobroks>
</admin>
<container version='1.0' id='container'>
<nodes count='4'>
<resources vcpu='12' memory='10Gb' disk='30Gb' architecture='arm64'/>
</nodes>
</container>
<container version='1.0' id='container2'>
<nodes count='2'>
<resources vcpu='4' memory='16Gb' disk='125Gb'>
<gpu count='1' memory='16Gb'/>
</resources>
</nodes>
</container>
<content version='1.0' id='foo'>
<documents>
<document type='type1' mode='index'/>
</documents>
<nodes count='5'>
<resources vcpu='8' memory='200Gb' disk='1Pb'/>
</nodes>
</content>
<content version='1.0' id='bar'>
<documents>
<document type='type1' mode='index'/>
</documents>
<nodes count='6'>
<resources vcpu='10' memory='64Gb' disk='200Gb'/>
</nodes>
</content>
</services>
""";
int totalHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(0.1, 0.2, 300, 0.3, NodeResources.DiskSpeed.slow), 1);
tester.addHosts(new NodeResources(0.1, 0.3, 1, 0.5), 2);
tester.addHosts(new NodeResources(12, 10, 30, 0.3,
NodeResources.DiskSpeed.fast, NodeResources.StorageType.local, NodeResources.Architecture.arm64), 4);
tester.addHosts(new NodeResources(4, 16, 125, 10,
NodeResources.DiskSpeed.fast, NodeResources.StorageType.local, Architecture.x86_64,
new NodeResources.GpuResources(1, 16)), 4);
tester.addHosts(new NodeResources(8, 200, 1000000, 0.3), 5);
tester.addHosts(new NodeResources(10, 64, 200, 0.3), 6);
tester.addHosts(new NodeResources(0.5, 2, 10, 0.3), 6);
VespaModel model = tester.createModel(services, true, 0);
assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size());
} | class ModelProvisioningTest {
@Test
public void testNodesJdisc() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>\n" +
"\n" +
"<admin version='3.0'><nodes count='1' /></admin>\n" +
"<container id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count=\"3\"/>" +
"</container>" +
"<container id='mydisc2' version='1.0'>" +
" <document-processing/>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count='2' preload='lib/blablamalloc.so'>" +
" <jvm allocated-memory='45%' gc-options='-XX:+UseParNewGC' options='-Xlog:gc' />" +
" </nodes>" +
"</container>" +
"</services>";
String hosts ="<hosts>"
+ " <host name='myhost0'>"
+ " <alias>node0</alias>"
+ " </host>"
+ " <host name='myhost1'>"
+ " <alias>node1</alias>"
+ " </host>"
+ " <host name='myhost2'>"
+ " <alias>node2</alias>"
+ " </host>"
+ " <host name='myhost3'>"
+ " <alias>node3</alias>"
+ " </host>"
+ " <host name='myhost4'>"
+ " <alias>node4</alias>"
+ " </host>"
+ " <host name='myhost5'>"
+ " <alias>node5</alias>"
+ " </host>"
+ "</hosts>";
VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services);
VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true, false)));
ApplicationContainerCluster mydisc = model.getContainerClusters().get("mydisc");
ApplicationContainerCluster mydisc2 = model.getContainerClusters().get("mydisc2");
assertEquals(3, mydisc.getContainers().size());
assertEquals("mydisc/container.0", (mydisc.getContainers().get(0).getConfigId()));
assertTrue(mydisc.getContainers().get(0).isInitialized());
assertEquals("mydisc/container.1", mydisc.getContainers().get(1).getConfigId());
assertTrue(mydisc.getContainers().get(1).isInitialized());
assertEquals("mydisc/container.2", mydisc.getContainers().get(2).getConfigId());
assertTrue(mydisc.getContainers().get(2).isInitialized());
assertEquals(2, mydisc2.getContainers().size());
assertEquals("mydisc2/container.0", mydisc2.getContainers().get(0).getConfigId());
assertTrue(mydisc2.getContainers().get(0).isInitialized());
assertEquals("mydisc2/container.1", mydisc2.getContainers().get(1).getConfigId());
assertTrue(mydisc2.getContainers().get(1).isInitialized());
assertEquals("", mydisc.getContainers().get(0).getJvmOptions());
assertEquals("", mydisc.getContainers().get(1).getJvmOptions());
assertEquals("", mydisc.getContainers().get(2).getJvmOptions());
assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(0).getPreLoad());
assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(1).getPreLoad());
assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(2).getPreLoad());
assertEquals(Optional.empty(), mydisc.getMemoryPercentage());
assertEquals("-Xlog:gc", mydisc2.getContainers().get(0).getJvmOptions());
assertEquals("-Xlog:gc", mydisc2.getContainers().get(1).getJvmOptions());
assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(0).getPreLoad());
assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(1).getPreLoad());
assertEquals(Optional.of(45), mydisc2.getMemoryPercentage());
assertEquals(Optional.of("-XX:+UseParNewGC"), mydisc2.getJvmGCOptions());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
mydisc2.getConfig(qrStartBuilder);
QrStartConfig qrsStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals(45, qrsStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
HostSystem hostSystem = model.hostSystem();
assertNotNull(hostSystem.getHostByHostname("myhost0"));
assertNotNull(hostSystem.getHostByHostname("myhost1"));
assertNotNull(hostSystem.getHostByHostname("myhost2"));
assertNotNull(hostSystem.getHostByHostname("myhost3"));
assertNull(hostSystem.getHostByHostname("Nope"));
}
@Test
public void testNodeCountForContentGroup() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
"\n" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
int numberOfHosts = 5;
tester.addHosts(numberOfHosts);
int numberOfContentNodes = 2;
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Map<String, ContentCluster> contentClusters = model.getContentClusters();
ContentCluster cluster = contentClusters.get("bar");
assertEquals(numberOfContentNodes, cluster.getRootGroup().getNodes().size());
int i = 0;
for (StorageNode node : cluster.getRootGroup().getNodes())
assertEquals(i++, node.getDistributionKey());
}
@Test
public void testSeparateClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(8);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(1, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(2, model.getContentClusters().get("content").getRootGroup().getNodes().size(), "Nodes in cluster without ID");
assertEquals(ApplicationContainerCluster.defaultHeapSizePercentageOfTotalNodeMemory, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size for container");
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Type.content, model);
assertProvisioned(1, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content"), ClusterSpec.Type.content, model);
}
@Test
public void testClusterMembership() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(1, model.hostSystem().getHosts().size());
HostResource host = model.hostSystem().getHosts().iterator().next();
assertTrue(host.spec().membership().isPresent());
assertEquals("container", host.spec().membership().get().cluster().type().name());
assertEquals("container1", host.spec().membership().get().cluster().id().value());
}
@Test
public void testCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
TestLogger logger = new TestLogger();
VespaModel model = tester.createModel(xmlWithNodes, true, new DeployState.Builder().deployLogger(logger));
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(18, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is lowered with combined clusters");
assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.18)), protonMemorySize(model.getContentClusters()
.get("content1")), "Memory for proton is lowered to account for the jvm heap");
assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model);
assertEquals(1, logger.msgs().size());
assertEquals("Declaring combined cluster with <nodes of=\"...\"> is deprecated without replacement, " +
"and the feature will be removed in Vespa 9. Use separate container and content clusters instead",
logger.msgs().get(0).message);
}
@Test
public void testCombinedClusterWithJvmHeapSizeOverride() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'>" +
" <jvm allocated-memory=\"30%\"/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(30, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is lowered with combined clusters");
assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.30)), protonMemorySize(model.getContentClusters()
.get("content1")), "Memory for proton is lowered to account for the jvm heap");
assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model);
}
/** For comparison with the above */
@Test
public void testNonCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(7);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(ApplicationContainerCluster.defaultHeapSizePercentageOfTotalNodeMemory, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is normal");
assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1")), "Memory for proton is normal");
}
@Test
public void testCombinedClusterWithJvmOptions() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <document-processing/>" +
" <nodes of='content1'>" +
" <jvm options='-Dtestoption=foo' />" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
for (Container container : model.getContainerClusters().get("container1").getContainers())
assertTrue(container.getJvmOptions().contains("testoption"));
}
@Test
public void testMultipleCombinedClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='content1'/>" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes of='content2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(8);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(3, model.getContentClusters().get("content2").getRootGroup().getNodes().size(), "Nodes in content2");
assertEquals(3, model.getContainerClusters().get("container2").getContainers().size(), "Nodes in container2");
}
@Test
public void testNonExistingCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' contains an invalid reference: referenced service 'container2' is not defined", Exceptions.toMessageString(e));
}
}
@Test
public void testInvalidCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes count='2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' contains an invalid reference: service 'container2' is not a content service", Exceptions.toMessageString(e));
}
}
@Test
public void testCombinedClusterWithZooKeeperFails() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" <zookeeper />" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("ZooKeeper should not be allowed on combined clusters");
} catch (IllegalArgumentException e) {
assertEquals("A combined cluster cannot run ZooKeeper", e.getMessage());
}
}
@Test
public void testUsingNodesAndGroupCountAttributes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 67;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getContainerClusters().size());
Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream()
.map(Container::getHost)
.collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<HostResource> clusterControllerHosts = admin.getClusterControllers().getContainers()
.stream().map(cc -> cc.getHostResource()).collect(Collectors.toSet());
Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue(clusterControllerHosts.containsAll(slobrokHosts), "Slobroks are assigned on cluster controller nodes");
assertTrue(containerHosts.contains(admin.getLogserver().getHost()), "Logserver is assigned from container nodes");
assertEquals(0, admin.getConfigservers().size(), "No in-cluster config servers in a hosted environment");
assertEquals(3, admin.getClusterControllers().getContainers().size(), "Dedicated admin cluster controllers when hosted");
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(3, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-57", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals(1, subGroups.get(0).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(0).getNodes().get(1).getConfigId());
assertEquals(2, subGroups.get(0).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/2", subGroups.get(0).getNodes().get(2).getConfigId());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(3, subGroups.get(1).getNodes().size());
assertEquals(3, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/3", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-54", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals(4, subGroups.get(1).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/4", subGroups.get(1).getNodes().get(1).getConfigId());
assertEquals(5, subGroups.get(1).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/5", subGroups.get(1).getNodes().get(2).getConfigId());
assertEquals("node-1-3-50-51", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("8", subGroups.get(8).getIndex());
assertEquals(3, subGroups.get(8).getNodes().size());
assertEquals(24, subGroups.get(8).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/24", subGroups.get(8).getNodes().get(0).getConfigId());
assertEquals(25, subGroups.get(8).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/25", subGroups.get(8).getNodes().get(1).getConfigId());
assertEquals(26, subGroups.get(8).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/26", subGroups.get(8).getNodes().get(2).getConfigId());
cluster = model.getContentClusters().get("baz");
subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(1, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-27", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(1, subGroups.get(1).getNodes().size());
assertEquals(1, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/1", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-26", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals("node-1-3-50-25", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("26", subGroups.get(26).getIndex());
assertEquals(1, subGroups.get(26).getNodes().size());
assertEquals(26, subGroups.get(26).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/26", subGroups.get(26).getNodes().get(0).getConfigId());
}
@Test
public void testSlobroksOnContainersIfNoContentClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getContainerClusters().size());
Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream()
.map(Container::getHost)
.collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue(containerHosts.containsAll(slobrokHosts),
"Slobroks are assigned from container nodes");
assertTrue(containerHosts.contains(admin.getLogserver().getHost()), "Logserver is assigned from container nodes");
assertEquals(0, admin.getConfigservers().size(), "No in-cluster config servers in a hosted environment");
}
@Test
public void testUsingNodesAndGroupCountAttributesWithoutDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 67;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getContainerClusters().size());
Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream()
.map(Container::getHost)
.collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<HostResource> clusterControllerHosts = admin.getClusterControllers().getContainers()
.stream().map(cc -> cc.getHostResource()).collect(Collectors.toSet());
Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue(clusterControllerHosts.containsAll(slobrokHosts), "Slobroks are assigned on cluster controller nodes");
assertTrue(containerHosts.contains(admin.getLogserver().getHost()), "Logserver is assigned from container nodes");
assertEquals(0, admin.getConfigservers().size(), "No in-cluster config servers in a hosted environment");
assertEquals(3, admin.getClusterControllers().getContainers().size());
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(3, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-57", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals(1, subGroups.get(0).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(0).getNodes().get(1).getConfigId());
assertEquals(2, subGroups.get(0).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/2", subGroups.get(0).getNodes().get(2).getConfigId());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(3, subGroups.get(1).getNodes().size());
assertEquals(3, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/3", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-54", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals(4, subGroups.get(1).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/4", subGroups.get(1).getNodes().get(1).getConfigId());
assertEquals(5, subGroups.get(1).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/5", subGroups.get(1).getNodes().get(2).getConfigId());
assertEquals("node-1-3-50-51", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("8", subGroups.get(8).getIndex());
assertEquals(3, subGroups.get(8).getNodes().size());
assertEquals(24, subGroups.get(8).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/24", subGroups.get(8).getNodes().get(0).getConfigId());
assertEquals(25, subGroups.get(8).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/25", subGroups.get(8).getNodes().get(1).getConfigId());
assertEquals(26, subGroups.get(8).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/26", subGroups.get(8).getNodes().get(2).getConfigId());
cluster = model.getContentClusters().get("baz");
subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(1, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-27", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(1, subGroups.get(1).getNodes().size());
assertEquals(1, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/1", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-26", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals("node-1-3-50-25", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("26", subGroups.get(26).getIndex());
assertEquals(1, subGroups.get(26).getNodes().size());
assertEquals(26, subGroups.get(26).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/26", subGroups.get(26).getNodes().get(0).getConfigId());
}
@Test
public void testGroupsOfSize1() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='8'/>" +
" </content>" +
"</services>";
int numberOfHosts = 21;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ClusterControllerContainerCluster clusterControllers = model.getAdmin().getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("cluster-controllers", clusterControllers.getName());
assertEquals("node-1-3-50-03", clusterControllers.getContainers().get(0).getHostName());
assertEquals("node-1-3-50-02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("node-1-3-50-01", clusterControllers.getContainers().get(2).getHostName());
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(8, subGroups.size());
assertEquals(8, cluster.distributionBits());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(1, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-11", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(1, subGroups.get(1).getNodes().size());
assertEquals(1, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-10", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals("7", subGroups.get(7).getIndex());
assertEquals(1, subGroups.get(7).getNodes().size());
assertEquals(7, subGroups.get(7).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/7", subGroups.get(7).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-04", subGroups.get(7).getNodes().get(0).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 11;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "node-1-3-50-09");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1+3, model.getAdmin().getSlobroks().size(), "Includes retired node");
assertEquals("node-1-3-50-11", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("node-1-3-50-10", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("node-1-3-50-08", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("node-1-3-50-09", model.getAdmin().getSlobroks().get(3).getHostName(), "Included in addition because it is retired");
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 12;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "node-1-3-50-03", "node-1-3-50-04");
assertEquals(10+2, model.getRoot().hostSystem().getHosts().size());
assertEquals(3+2, model.getAdmin().getSlobroks().size(), "Includes retired node");
assertEquals("node-1-3-50-12", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("node-1-3-50-11", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("node-1-3-50-10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("node-1-3-50-04", model.getAdmin().getSlobroks().get(3).getHostName(), "Included in addition because it is retired");
assertEquals("node-1-3-50-03", model.getAdmin().getSlobroks().get(4).getHostName(), "Included in addition because it is retired");
}
@Test
public void testSlobroksAreSpreadOverAllContainerClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='bar'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 16;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "node-1-3-50-15", "node-1-3-50-05", "node-1-3-50-04");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(7, model.getAdmin().getSlobroks().size(), "Includes retired node");
assertEquals("node-1-3-50-16", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("node-1-3-50-14", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("node-1-3-50-15", model.getAdmin().getSlobroks().get(2).getHostName(), "Included in addition because it is retired");
assertEquals("node-1-3-50-03", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("node-1-3-50-05", model.getAdmin().getSlobroks().get(5).getHostName(), "Included in addition because it is retired");
assertEquals("node-1-3-50-04", model.getAdmin().getSlobroks().get(6).getHostName(), "Included in addition because it is retired");
}
@Test
public void testDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='foo'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' />" +
" </content>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' />" +
" </content>" +
"</services>";
int numberOfHosts = 7;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services);
assertEquals(7, model.getRoot().hostSystem().getHosts().size());
ClusterControllerContainerCluster clusterControllers = model.getAdmin().getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("cluster-controllers", clusterControllers.getName());
clusterControllers.getContainers().stream().map(ClusterControllerContainer::getHost).forEach(host -> {
assertTrue(host.spec().membership().get().cluster().isStateful());
assertEquals(ClusterSpec.Type.admin, host.spec().membership().get().cluster().type());
});
}
@Test
public void testLogserverContainerWhenDedicatedLogserver() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers>" +
" <nodes count='1' dedicated='true'/>" +
" </logservers>" +
" </admin>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
boolean useDedicatedNodeForLogserver = false;
testContainerOnLogserverHost(services, useDedicatedNodeForLogserver);
}
@Test
public void testLogForwarderNotInAdminCluster() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers>" +
" <nodes count='1' dedicated='true'/>" +
" </logservers>" +
" <logforwarding>" +
" <splunk deployment-server='bardeplserv:123' client-name='barclinam' phone-home-interval='987' />" +
" </logforwarding>" +
" </admin>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts+1);
VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Admin admin = model.getAdmin();
Logserver logserver = admin.getLogserver();
HostResource hostResource = logserver.getHostResource();
assertNotNull(hostResource.getService("logserver"));
assertNull(hostResource.getService("container"));
assertNull(hostResource.getService("logforwarder"));
var clist = model.getContainerClusters().get("foo").getContainers();
assertEquals(1, clist.size());
hostResource = clist.get(0).getHostResource();
assertNull(hostResource.getService("logserver"));
assertNotNull(hostResource.getService("container"));
assertNotNull(hostResource.getService("logforwarder"));
var lfs = hostResource.getService("logforwarder");
String shutdown = lfs.getPreShutdownCommand().orElse("<none>");
assertTrue(shutdown.startsWith("$ROOT/bin/vespa-logforwarder-start -S -c hosts/"));
}
@Test
public void testLogForwarderInAdminCluster() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers>" +
" <nodes count='1' dedicated='true'/>" +
" </logservers>" +
" <logforwarding include-admin='true'>" +
" <splunk deployment-server='bardeplserv:123' client-name='barclinam' phone-home-interval='987' />" +
" </logforwarding>" +
" </admin>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts+1);
VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Admin admin = model.getAdmin();
Logserver logserver = admin.getLogserver();
HostResource hostResource = logserver.getHostResource();
assertNotNull(hostResource.getService("logserver"));
assertNull(hostResource.getService("container"));
assertNotNull(hostResource.getService("logforwarder"));
var clist = model.getContainerClusters().get("foo").getContainers();
assertEquals(1, clist.size());
hostResource = clist.get(0).getHostResource();
assertNull(hostResource.getService("logserver"));
assertNotNull(hostResource.getService("container"));
assertNotNull(hostResource.getService("logforwarder"));
}
@Test
public void testImplicitLogserverContainer() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
boolean useDedicatedNodeForLogserver = true;
testContainerOnLogserverHost(services, useDedicatedNodeForLogserver);
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveReadyCopies());
assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(3, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(2, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals(1, subGroups.get(0).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(0).getNodes().get(1).getConfigId());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(2, subGroups.get(1).getNodes().size());
assertEquals(2, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/2", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals(3, subGroups.get(1).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/3", subGroups.get(1).getNodes().get(1).getConfigId());
assertEquals("2", subGroups.get(2).getIndex());
assertEquals(2, subGroups.get(2).getNodes().size());
assertEquals(4, subGroups.get(2).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/4", subGroups.get(2).getNodes().get(0).getConfigId());
assertEquals(5, subGroups.get(2).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/5", subGroups.get(2).getNodes().get(1).getConfigId());
}
@Test
public void testRedundancyWithGroupsTooHighRedundancyAndOneRetiredNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' groups='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
try {
VespaModel model = tester.createModel(services, false, "node-1-3-50-03");
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("Cluster 'bar' specifies redundancy 2, but it cannot be higher than the minimum nodes per group, which is 1", Exceptions.toMessageString(e));
}
}
@Test
public void testRedundancyWithGroupsAndThreeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' groups='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false, "node-1-3-50-05", "node-1-3-50-04", "node-1-3-50-03");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2, cluster.redundancy().effectiveReadyCopies());
assertEquals("1|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(2, cluster.getRootGroup().getSubgroups().size());
}
@Test
public void testRedundancy2DownscaledToOneNodeButOneRetired() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false, false, true, "node-1-3-50-03");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2, cluster.getStorageCluster().getChildren().size());
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(2, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
}
@Test
public void testUsingNodesCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <search/>" +
" <nodes count='2'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(4, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(4, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(4, cluster.redundancy().effectiveReadyCopies());
assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertEquals(4, cluster.getSearch().getIndexed().getSearchableCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", cluster.getRootGroup().getNodes().get(0).getConfigId());
assertEquals(1, cluster.getRootGroup().getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", cluster.getRootGroup().getNodes().get(1).getConfigId());
assertEquals(2, cluster.getRootGroup().getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/2", cluster.getRootGroup().getNodes().get(2).getConfigId());
assertEquals(3, cluster.getRootGroup().getNodes().get(3).getDistributionKey());
assertEquals("bar/storage/3", cluster.getRootGroup().getNodes().get(3).getConfigId());
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", cluster.getRootGroup().getNodes().get(0).getConfigId());
}
@Test
public void testRequiringMoreNodesThanAreAvailable() {
assertThrows(IllegalArgumentException.class, () -> {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
});
}
@Test
public void testRequiredNodesAndDedicatedClusterControllers() {
assertThrows(IllegalArgumentException.class, () -> {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='foo'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 4;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
});
}
@Test
public void testExclusiveNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<container version='1.0' id='container'>" +
" <nodes count='2' exclusive='true'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' exclusive='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive()));
}
@Test
public void testUsingNodesCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", cluster.getRootGroup().getNodes().get(0).getConfigId());
}
@Test
@Test
public void testRequestingRangesMin() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='[4, 6]'>" +
" <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='[6, 20]' groups='[3,4]'>" +
" <resources vcpu='8' memory='200Gb' disk='1Pb'/>" +
" </nodes>" +
" </content>" +
"</services>";
int totalHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(11.5, 10, 30, 0.3), 6);
tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20);
tester.addHosts(new NodeResources( 0.5, 2, 10, 0.3), 3);
VespaModel model = tester.createModel(services, true);
assertEquals(totalHosts + 3, model.getRoot().hostSystem().getHosts().size());
}
@Test
public void testRequestingRangesMax() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='[4, 6]'>" +
" <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='[6, 20]' groups='[3,4]'>" +
" <resources vcpu='8' memory='200Gb' disk='1Pb'/>" +
" </nodes>" +
" </content>" +
"</services>";
int totalHosts = 29;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 6);
tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20);
tester.addHosts(new NodeResources( 0.5, 2, 10, 0.3), 3);
VespaModel model = tester.createModel(services, true, true);
assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size());
}
@Test
public void testUseArm64NodesForAdminCluster() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='4.0'>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <nodes count='2'>" +
" <resources vcpu='2' memory='8Gb' disk='30Gb'/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='2' memory='8Gb' disk='30Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(true);
tester.setAdminClusterArchitecture(Architecture.arm64);
tester.useDedicatedNodeForLogserver(true);
tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 4);
tester.addHosts(new NodeResources(0.5, 2, 50, 0.3, DiskSpeed.fast, StorageType.any, Architecture.arm64), 4);
VespaModel model = tester.createModel(services, true, true);
List<HostResource> hosts = model.getRoot().hostSystem().getHosts();
assertEquals(8, hosts.size());
Set<HostResource> clusterControllerResources = getHostResourcesForService(hosts, "container-clustercontroller");
assertEquals(3, clusterControllerResources.size());
assertTrue(clusterControllerResources.stream().allMatch(host -> host.realResources().architecture() == Architecture.arm64));
Set<HostResource> logserverResources = getHostResourcesForService(hosts, "logserver-container");
assertEquals(1, logserverResources.size());
assertTrue(logserverResources.stream().allMatch(host -> host.realResources().architecture() == Architecture.arm64));
assertTrue(hosts.stream()
.filter(host -> !clusterControllerResources.contains(host))
.filter(host -> !logserverResources.contains(host))
.allMatch(host -> host.realResources().architecture() == Architecture.getDefault()));
}
private Set<HostResource> getHostResourcesForService(List<HostResource> hosts, String service) {
return hosts.stream()
.filter(host -> host.getHostInfo().getServices().stream()
.anyMatch(s -> s.getServiceType().equals(service)))
.collect(Collectors.toSet());
}
@Test
public void testContainerOnly() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<container version='1.0'>" +
" <search/>" +
" <nodes count='3'/>" +
"</container>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(3, model.getContainerClusters().get("container").getContainers().size());
assertNotNull(model.getAdmin().getLogserver());
assertEquals(3, model.getAdmin().getSlobroks().size());
}
@Test
public void testJvmOptions() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<container version='1.0'>" +
" <search/>" +
" <nodes count='3'>" +
" <jvm options='-DfooOption=xyz' /> " +
" </nodes>" +
"</container>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals("-DfooOption=xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions());
}
@Test
public void testUsingHostaliasWithProvisioner() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<admin version='2.0'>" +
" <adminserver hostalias='node1'/>\n"+
"</admin>\n" +
"<container id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
"</container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedVespa() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<container id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" +
" </http>" +
"</container>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(services, true);
assertEquals(2, model.getHosts().size());
assertEquals(1, model.getContainerClusters().size());
assertEquals(2, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testThatStandaloneSyntaxOnHostedVespaRequiresDefaultPort() {
try {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<container id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='8095' />" +
" </http>" +
"</container>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("Illegal port 8095 in http server 'server1': Port must be set to " +
getDefaults().vespaWebServicePort(), e.getMessage());
}
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedManuallyDeployed() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <container id='foo' version='1.0'>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" <node hostalias='node2'/>" +
" </nodes>" +
" </container>" +
" <content id='bar' version='1.0'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <redundancy>2</redundancy>" +
" <nodes>" +
" <group>" +
" <node distribution-key='0' hostalias='node3'/>" +
" <node distribution-key='1' hostalias='node4'/>" +
" </group>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(true);
tester.addHosts(4);
VespaModel model = tester.createModel(new Zone(Environment.dev, RegionName.from("us-central-1")), services, true);
assertEquals(3, model.getHosts().size(), "We get 1 node per cluster and no admin node apart from the dedicated cluster controller");
assertEquals(1, model.getContainerClusters().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
assertEquals(1, model.getAdmin().getClusterControllers().getContainers().size());
}
@Test
public void testThatStandaloneSyntaxWithClusterControllerWorksOnHostedManuallyDeployed() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <nodes count=\"2\" />" +
" </container>" +
" <content id='bar' version='1.0'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <redundancy>1</redundancy>" +
" <nodes>" +
" <group>" +
" <node distribution-key='0' hostalias='node3'/>" +
" </group>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(true);
tester.addHosts(4);
try {
VespaModel model = tester.createModel(new Zone(Environment.staging, RegionName.from("us-central-1")), services, true);
fail("expected failure");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Clusters in hosted environments must have a <nodes count='N'> tag"));
}
}
/** Deploying an application with "nodes count" standalone should give a single-node deployment */
@Test
public void testThatHostedSyntaxWorksOnStandalone() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(3);
VespaModel model = tester.createModel(services, true);
assertEquals(1,
model.getContainerClusters().get("container1").getContainers().size(),
"Nodes in container cluster");
assertEquals(1,
model.getContentClusters().get("content").getRootGroup().getNodes().size(),
"Nodes in content cluster (downscaled)");
assertEquals(1, model.getAdmin().getSlobroks().size());
model.getConfig(new StorStatusConfig.Builder(), "default");
StorageCluster storage = model.getContentClusters().get("content").getStorageCluster();
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
storage.getChildren().get("0").getConfig(builder);
}
/**
* Deploying an application with "nodes count" standalone should give a single-node deployment,
* also if the user has a lingering hosts file from running self-hosted.
*
* NOTE: This does *not* work (but gives an understandable error message),
* but the current code does not get provoke the error that is thrown from HostsXmlProvisioner.prepare
*/
@Test
public void testThatHostedSyntaxWorksOnStandaloneAlsoWithAHostedFile() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(3);
VespaModel model = tester.createModel(services, hosts, true);
assertEquals(1,
model.getContainerClusters().get("container1").getContainers().size(),
"Nodes in container cluster");
assertEquals(1,
model.getContentClusters().get("content").getRootGroup().getNodes().size(),
"Nodes in content cluster (downscaled)");
assertEquals(1, model.getAdmin().getSlobroks().size());
model.getConfig(new StorStatusConfig.Builder(), "default");
StorageCluster storage = model.getContentClusters().get("content").getStorageCluster();
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
storage.getChildren().get("0").getConfig(builder);
}
@Test
public void testNoNodeTagMeansTwoNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(6);
VespaModel model = tester.createModel(services, true);
assertEquals(6, model.getRoot().hostSystem().getHosts().size());
assertEquals(3, model.getAdmin().getSlobroks().size());
assertEquals(2, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
}
@Test
public void testNoNodeTagMeansTwoNodesNoContent() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(services, true);
assertEquals(2, model.getRoot().hostSystem().getHosts().size());
assertEquals(2, model.getAdmin().getSlobroks().size());
assertEquals(2, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testNoNodeTagMeans1NodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size());
}
@Test
public void testSingleNodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" <nodes><node hostalias='foo'/></nodes>"+
" </container>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
}
/** Recreate the combination used in some factory tests */
@Test
public void testMultitenantButNotHosted() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <container id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </container>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testModelWithReferencedIndexingCluster() {
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes>\n" +
" <jvm options=\"-Xms512m -Xmx512m\"/>\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(1, content.getRootGroup().getNodes().size());
ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testSharedNodesNotHosted() {
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" <node hostalias=\"vespa-2\"/>\n" +
" <node hostalias=\"vespa-3\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" +
" <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedModel(false, hosts, services);
assertEquals(3, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(3, content.getRootGroup().getNodes().size());
}
@Test
public void testMultitenantButNotHostedSharedContentNode() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <container id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </container>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" <content id='search' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" </group>" +
" <documents>" +
" <document type='type1'/>" +
" </documents>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testStatefulProperty() {
String servicesXml =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='qrs'>" +
" <nodes count='1'/>" +
" </container>" +
" <container version='1.0' id='zk'>" +
" <zookeeper/>" +
" <nodes count='3'/>" +
" </container>" +
" <content version='1.0' id='content'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(9);
VespaModel model = tester.createModel(servicesXml, true);
Map<String, Boolean> tests = Map.of("qrs", false,
"zk", true,
"content", true);
Map<String, List<HostResource>> hostsByCluster = model.hostSystem().getHosts().stream()
.collect(Collectors.groupingBy(h -> h.spec().membership().get().cluster().id().value()));
tests.forEach((clusterId, stateful) -> {
List<HostResource> hosts = hostsByCluster.getOrDefault(clusterId, List.of());
assertFalse(hosts.isEmpty(), "Hosts are provisioned for '" + clusterId + "'");
assertEquals(stateful,
hosts.stream().allMatch(h -> h.spec().membership().get().cluster().isStateful()),
"Hosts in cluster '" + clusterId + "' are " + (stateful ? "" : "not ") + "stateful");
});
}
@Test
public void containerWithZooKeeperSuboptimalNodeCountDuringRetirement() {
String servicesXml =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='zk'>" +
" <zookeeper/>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(4);
VespaModel model = tester.createModel(servicesXml, true, "node-1-3-50-04");
ApplicationContainerCluster cluster = model.getContainerClusters().get("zk");
assertEquals(1, cluster.getContainers().stream().filter(Container::isRetired).count());
assertEquals(3, cluster.getContainers().stream().filter(c -> !c.isRetired()).count());
}
@Test
public void containerWithZooKeeperJoiningServers() {
Function<Integer, String> servicesXml = (nodeCount) -> {
return "<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='zk'>" +
" <zookeeper/>" +
" <nodes count='" + nodeCount + "'/>" +
" </container>" +
"</services>";
};
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(servicesXml.apply(3), true);
{
ApplicationContainerCluster cluster = model.getContainerClusters().get("zk");
ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder();
cluster.getContainers().forEach(c -> c.getConfig(config));
cluster.getConfig(config);
assertTrue(config.build().server().stream().noneMatch(ZookeeperServerConfig.Server::joining), "Initial servers are not joining");
}
{
VespaModel nextModel = tester.createModel(Zone.defaultZone(), servicesXml.apply(3), true, false, false, 0, Optional.of(model), new DeployState.Builder(), "node-1-3-50-04", "node-1-3-50-03");
ApplicationContainerCluster cluster = nextModel.getContainerClusters().get("zk");
ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder();
cluster.getContainers().forEach(c -> c.getConfig(config));
cluster.getConfig(config);
assertEquals(Map.of(0, false,
1, false,
2, false,
3, true,
4, true),
config.build().server().stream().collect(Collectors.toMap(ZookeeperServerConfig.Server::id,
ZookeeperServerConfig.Server::joining)),
"New nodes are joining");
assertEquals(Map.of(0, false,
1, true,
2, true,
3, false,
4, false),
config.build().server().stream().collect(Collectors.toMap(ZookeeperServerConfig.Server::id,
ZookeeperServerConfig.Server::retired)),
"Retired nodes are retired");
}
}
private VespaModel createNonProvisionedMultitenantModel(String services) {
return createNonProvisionedModel(true, null, services);
}
private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) {
VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, generateSchemas("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
DeployState deployState = new DeployState.Builder().applicationPackage(appPkg).
properties((new TestProperties()).setMultitenant(multitenant)).
build();
return modelCreatorWithMockPkg.create(false, deployState);
}
private int physicalMemoryPercentage(ContainerCluster<?> cluster) {
QrStartConfig.Builder b = new QrStartConfig.Builder();
cluster.getConfig(b);
return b.build().jvm().heapSizeAsPercentageOfPhysicalMemory();
}
private long protonMemorySize(ContentCluster cluster) {
ProtonConfig.Builder b = new ProtonConfig.Builder();
cluster.getSearch().getIndexed().getSearchNode(0).getConfig(b);
return b.build().hwinfo().memory().size();
}
@Test
public void require_that_proton_config_is_tuned_based_on_node_resources() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='2'>",
" <resources vcpu='1' memory='3Gb' disk='9Gb' bandwidth='5Gbps' disk-speed='slow'/>",
" </nodes>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(1, 3, 10, 5, NodeResources.DiskSpeed.slow), 5);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
assertEquals(2, cluster.getSearchNodes().size());
assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001);
assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001);
}
private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
List<SearchNode> searchNodes = cluster.getSearchNodes();
assertTrue(searchNodeIdx < searchNodes.size());
searchNodes.get(searchNodeIdx).getConfig(builder);
return new ProtonConfig(builder);
}
@Test
public void require_that_config_override_and_explicit_proton_tuning_and_resource_limits_have_precedence_over_default_node_resource_tuning() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <config name='vespa.config.search.core.proton'>",
" <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>",
" </config>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='1'>",
" <resources vcpu='1' memory='128Gb' disk='100Gb'/>",
" </nodes>",
" <engine>",
" <proton>",
" <tuning>",
" <searchnode>",
" <flushstrategy>",
" <native>",
" <total>",
" <maxmemorygain>1000</maxmemorygain>",
" </total>",
" </native>",
" </flushstrategy>",
" </searchnode>",
" </tuning>",
" </proton>",
" </engine>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(1, 3, 10, 1), 4);
tester.addHosts(new NodeResources(1, 128, 100, 0.3), 1);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId());
assertEquals(2000, cfg.flush().memory().maxtlssize());
assertEquals(1000, cfg.flush().memory().maxmemory());
assertEquals((long) ((128 - reservedMemoryGb) * GB * 0.08), cfg.flush().memory().each().maxmemory());
}
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
model.getConfig(builder, configId);
return new ProtonConfig(builder);
}
private void testContainerOnLogserverHost(String services, boolean useDedicatedNodeForLogserver) {
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.useDedicatedNodeForLogserver(useDedicatedNodeForLogserver);
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Admin admin = model.getAdmin();
Logserver logserver = admin.getLogserver();
HostResource hostResource = logserver.getHostResource();
assertNotNull(hostResource.getService("logserver"));
String containerServiceType = ContainerServiceType.LOGSERVER_CONTAINER.serviceName;
assertNotNull(hostResource.getService(containerServiceType));
String configId = admin.getLogserver().getHostResource().getService(containerServiceType).getConfigId();
ApplicationMetadataConfig.Builder builder = new ApplicationMetadataConfig.Builder();
model.getConfig(builder, configId);
ApplicationMetadataConfig cfg = new ApplicationMetadataConfig(builder);
assertEquals(1, cfg.generation());
LogdConfig.Builder logdConfigBuilder = new LogdConfig.Builder();
model.getConfig(logdConfigBuilder, configId);
LogdConfig logdConfig = new LogdConfig(logdConfigBuilder);
assertTrue(logdConfig.logserver().use());
}
private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Id combinedId,
ClusterSpec.Type type, VespaModel model) {
assertEquals(nodeCount,
model.hostSystem().getHosts().stream()
.map(h -> h.spec().membership().get().cluster())
.filter(spec -> spec.id().equals(id) && spec.type().equals(type) && spec.combinedId().equals(Optional.ofNullable(combinedId)))
.count(),
"Nodes in cluster " + id + " with type " + type + (combinedId != null ? ", combinedId " + combinedId : ""));
}
private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Type type, VespaModel model) {
assertProvisioned(nodeCount, id, null, type, model);
}
record TestLogger(List<LogMessage> msgs) implements DeployLogger {
public TestLogger() {
this(new ArrayList<>());
}
@Override
public void log(Level level, String message) {
msgs.add(new LogMessage(level, message));
}
record LogMessage(Level level, String message) {}
}
} | class ModelProvisioningTest {
@Test
public void testNodesJdisc() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>\n" +
"\n" +
"<admin version='3.0'><nodes count='1' /></admin>\n" +
"<container id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count=\"3\"/>" +
"</container>" +
"<container id='mydisc2' version='1.0'>" +
" <document-processing/>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count='2' preload='lib/blablamalloc.so'>" +
" <jvm allocated-memory='45%' gc-options='-XX:+UseParNewGC' options='-Xlog:gc' />" +
" </nodes>" +
"</container>" +
"</services>";
String hosts ="<hosts>"
+ " <host name='myhost0'>"
+ " <alias>node0</alias>"
+ " </host>"
+ " <host name='myhost1'>"
+ " <alias>node1</alias>"
+ " </host>"
+ " <host name='myhost2'>"
+ " <alias>node2</alias>"
+ " </host>"
+ " <host name='myhost3'>"
+ " <alias>node3</alias>"
+ " </host>"
+ " <host name='myhost4'>"
+ " <alias>node4</alias>"
+ " </host>"
+ " <host name='myhost5'>"
+ " <alias>node5</alias>"
+ " </host>"
+ "</hosts>";
VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services);
VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true, false)));
ApplicationContainerCluster mydisc = model.getContainerClusters().get("mydisc");
ApplicationContainerCluster mydisc2 = model.getContainerClusters().get("mydisc2");
assertEquals(3, mydisc.getContainers().size());
assertEquals("mydisc/container.0", (mydisc.getContainers().get(0).getConfigId()));
assertTrue(mydisc.getContainers().get(0).isInitialized());
assertEquals("mydisc/container.1", mydisc.getContainers().get(1).getConfigId());
assertTrue(mydisc.getContainers().get(1).isInitialized());
assertEquals("mydisc/container.2", mydisc.getContainers().get(2).getConfigId());
assertTrue(mydisc.getContainers().get(2).isInitialized());
assertEquals(2, mydisc2.getContainers().size());
assertEquals("mydisc2/container.0", mydisc2.getContainers().get(0).getConfigId());
assertTrue(mydisc2.getContainers().get(0).isInitialized());
assertEquals("mydisc2/container.1", mydisc2.getContainers().get(1).getConfigId());
assertTrue(mydisc2.getContainers().get(1).isInitialized());
assertEquals("", mydisc.getContainers().get(0).getJvmOptions());
assertEquals("", mydisc.getContainers().get(1).getJvmOptions());
assertEquals("", mydisc.getContainers().get(2).getJvmOptions());
assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(0).getPreLoad());
assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(1).getPreLoad());
assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(2).getPreLoad());
assertEquals(Optional.empty(), mydisc.getMemoryPercentage());
assertEquals("-Xlog:gc", mydisc2.getContainers().get(0).getJvmOptions());
assertEquals("-Xlog:gc", mydisc2.getContainers().get(1).getJvmOptions());
assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(0).getPreLoad());
assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(1).getPreLoad());
assertEquals(Optional.of(45), mydisc2.getMemoryPercentage());
assertEquals(Optional.of("-XX:+UseParNewGC"), mydisc2.getJvmGCOptions());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
mydisc2.getConfig(qrStartBuilder);
QrStartConfig qrsStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals(45, qrsStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
HostSystem hostSystem = model.hostSystem();
assertNotNull(hostSystem.getHostByHostname("myhost0"));
assertNotNull(hostSystem.getHostByHostname("myhost1"));
assertNotNull(hostSystem.getHostByHostname("myhost2"));
assertNotNull(hostSystem.getHostByHostname("myhost3"));
assertNull(hostSystem.getHostByHostname("Nope"));
}
@Test
public void testNodeCountForContentGroup() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
"\n" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
int numberOfHosts = 5;
tester.addHosts(numberOfHosts);
int numberOfContentNodes = 2;
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Map<String, ContentCluster> contentClusters = model.getContentClusters();
ContentCluster cluster = contentClusters.get("bar");
assertEquals(numberOfContentNodes, cluster.getRootGroup().getNodes().size());
int i = 0;
for (StorageNode node : cluster.getRootGroup().getNodes())
assertEquals(i++, node.getDistributionKey());
}
@Test
public void testSeparateClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(8);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(1, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(2, model.getContentClusters().get("content").getRootGroup().getNodes().size(), "Nodes in cluster without ID");
assertEquals(ApplicationContainerCluster.defaultHeapSizePercentageOfTotalNodeMemory, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size for container");
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Type.content, model);
assertProvisioned(1, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content"), ClusterSpec.Type.content, model);
}
@Test
public void testClusterMembership() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(1, model.hostSystem().getHosts().size());
HostResource host = model.hostSystem().getHosts().iterator().next();
assertTrue(host.spec().membership().isPresent());
assertEquals("container", host.spec().membership().get().cluster().type().name());
assertEquals("container1", host.spec().membership().get().cluster().id().value());
}
@Test
public void testCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
TestLogger logger = new TestLogger();
VespaModel model = tester.createModel(xmlWithNodes, true, new DeployState.Builder().deployLogger(logger));
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(18, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is lowered with combined clusters");
assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.18)), protonMemorySize(model.getContentClusters()
.get("content1")), "Memory for proton is lowered to account for the jvm heap");
assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model);
assertEquals(1, logger.msgs().size());
assertEquals("Declaring combined cluster with <nodes of=\"...\"> is deprecated without replacement, " +
"and the feature will be removed in Vespa 9. Use separate container and content clusters instead",
logger.msgs().get(0).message);
}
@Test
public void testCombinedClusterWithJvmHeapSizeOverride() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'>" +
" <jvm allocated-memory=\"30%\"/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(30, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is lowered with combined clusters");
assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.30)), protonMemorySize(model.getContentClusters()
.get("content1")), "Memory for proton is lowered to account for the jvm heap");
assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model);
}
/** For comparison with the above */
@Test
public void testNonCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(7);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(ApplicationContainerCluster.defaultHeapSizePercentageOfTotalNodeMemory, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is normal");
assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1")), "Memory for proton is normal");
}
@Test
public void testCombinedClusterWithJvmOptions() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <document-processing/>" +
" <nodes of='content1'>" +
" <jvm options='-Dtestoption=foo' />" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
for (Container container : model.getContainerClusters().get("container1").getContainers())
assertTrue(container.getJvmOptions().contains("testoption"));
}
@Test
public void testMultipleCombinedClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='content1'/>" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes of='content2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(8);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(3, model.getContentClusters().get("content2").getRootGroup().getNodes().size(), "Nodes in content2");
assertEquals(3, model.getContainerClusters().get("container2").getContainers().size(), "Nodes in container2");
}
@Test
public void testNonExistingCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' contains an invalid reference: referenced service 'container2' is not defined", Exceptions.toMessageString(e));
}
}
@Test
public void testInvalidCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes count='2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' contains an invalid reference: service 'container2' is not a content service", Exceptions.toMessageString(e));
}
}
@Test
public void testCombinedClusterWithZooKeeperFails() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" <zookeeper />" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("ZooKeeper should not be allowed on combined clusters");
} catch (IllegalArgumentException e) {
assertEquals("A combined cluster cannot run ZooKeeper", e.getMessage());
}
}
@Test
public void testUsingNodesAndGroupCountAttributes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 67;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getContainerClusters().size());
Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream()
.map(Container::getHost)
.collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<HostResource> clusterControllerHosts = admin.getClusterControllers().getContainers()
.stream().map(cc -> cc.getHostResource()).collect(Collectors.toSet());
Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue(clusterControllerHosts.containsAll(slobrokHosts), "Slobroks are assigned on cluster controller nodes");
assertTrue(containerHosts.contains(admin.getLogserver().getHost()), "Logserver is assigned from container nodes");
assertEquals(0, admin.getConfigservers().size(), "No in-cluster config servers in a hosted environment");
assertEquals(3, admin.getClusterControllers().getContainers().size(), "Dedicated admin cluster controllers when hosted");
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(3, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-57", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals(1, subGroups.get(0).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(0).getNodes().get(1).getConfigId());
assertEquals(2, subGroups.get(0).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/2", subGroups.get(0).getNodes().get(2).getConfigId());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(3, subGroups.get(1).getNodes().size());
assertEquals(3, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/3", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-54", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals(4, subGroups.get(1).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/4", subGroups.get(1).getNodes().get(1).getConfigId());
assertEquals(5, subGroups.get(1).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/5", subGroups.get(1).getNodes().get(2).getConfigId());
assertEquals("node-1-3-50-51", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("8", subGroups.get(8).getIndex());
assertEquals(3, subGroups.get(8).getNodes().size());
assertEquals(24, subGroups.get(8).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/24", subGroups.get(8).getNodes().get(0).getConfigId());
assertEquals(25, subGroups.get(8).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/25", subGroups.get(8).getNodes().get(1).getConfigId());
assertEquals(26, subGroups.get(8).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/26", subGroups.get(8).getNodes().get(2).getConfigId());
cluster = model.getContentClusters().get("baz");
subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(1, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-27", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(1, subGroups.get(1).getNodes().size());
assertEquals(1, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/1", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-26", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals("node-1-3-50-25", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("26", subGroups.get(26).getIndex());
assertEquals(1, subGroups.get(26).getNodes().size());
assertEquals(26, subGroups.get(26).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/26", subGroups.get(26).getNodes().get(0).getConfigId());
}
@Test
public void testSlobroksOnContainersIfNoContentClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getContainerClusters().size());
Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream()
.map(Container::getHost)
.collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue(containerHosts.containsAll(slobrokHosts),
"Slobroks are assigned from container nodes");
assertTrue(containerHosts.contains(admin.getLogserver().getHost()), "Logserver is assigned from container nodes");
assertEquals(0, admin.getConfigservers().size(), "No in-cluster config servers in a hosted environment");
}
@Test
public void testUsingNodesAndGroupCountAttributesWithoutDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 67;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getContainerClusters().size());
Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream()
.map(Container::getHost)
.collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<HostResource> clusterControllerHosts = admin.getClusterControllers().getContainers()
.stream().map(cc -> cc.getHostResource()).collect(Collectors.toSet());
Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue(clusterControllerHosts.containsAll(slobrokHosts), "Slobroks are assigned on cluster controller nodes");
assertTrue(containerHosts.contains(admin.getLogserver().getHost()), "Logserver is assigned from container nodes");
assertEquals(0, admin.getConfigservers().size(), "No in-cluster config servers in a hosted environment");
assertEquals(3, admin.getClusterControllers().getContainers().size());
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(3, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-57", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals(1, subGroups.get(0).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(0).getNodes().get(1).getConfigId());
assertEquals(2, subGroups.get(0).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/2", subGroups.get(0).getNodes().get(2).getConfigId());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(3, subGroups.get(1).getNodes().size());
assertEquals(3, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/3", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-54", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals(4, subGroups.get(1).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/4", subGroups.get(1).getNodes().get(1).getConfigId());
assertEquals(5, subGroups.get(1).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/5", subGroups.get(1).getNodes().get(2).getConfigId());
assertEquals("node-1-3-50-51", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("8", subGroups.get(8).getIndex());
assertEquals(3, subGroups.get(8).getNodes().size());
assertEquals(24, subGroups.get(8).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/24", subGroups.get(8).getNodes().get(0).getConfigId());
assertEquals(25, subGroups.get(8).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/25", subGroups.get(8).getNodes().get(1).getConfigId());
assertEquals(26, subGroups.get(8).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/26", subGroups.get(8).getNodes().get(2).getConfigId());
cluster = model.getContentClusters().get("baz");
subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(1, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-27", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(1, subGroups.get(1).getNodes().size());
assertEquals(1, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/1", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-26", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals("node-1-3-50-25", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("26", subGroups.get(26).getIndex());
assertEquals(1, subGroups.get(26).getNodes().size());
assertEquals(26, subGroups.get(26).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/26", subGroups.get(26).getNodes().get(0).getConfigId());
}
@Test
public void testGroupsOfSize1() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='8'/>" +
" </content>" +
"</services>";
int numberOfHosts = 21;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ClusterControllerContainerCluster clusterControllers = model.getAdmin().getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("cluster-controllers", clusterControllers.getName());
assertEquals("node-1-3-50-03", clusterControllers.getContainers().get(0).getHostName());
assertEquals("node-1-3-50-02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("node-1-3-50-01", clusterControllers.getContainers().get(2).getHostName());
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(8, subGroups.size());
assertEquals(8, cluster.distributionBits());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(1, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-11", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(1, subGroups.get(1).getNodes().size());
assertEquals(1, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-10", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals("7", subGroups.get(7).getIndex());
assertEquals(1, subGroups.get(7).getNodes().size());
assertEquals(7, subGroups.get(7).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/7", subGroups.get(7).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-04", subGroups.get(7).getNodes().get(0).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 11;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "node-1-3-50-09");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1+3, model.getAdmin().getSlobroks().size(), "Includes retired node");
assertEquals("node-1-3-50-11", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("node-1-3-50-10", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("node-1-3-50-08", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("node-1-3-50-09", model.getAdmin().getSlobroks().get(3).getHostName(), "Included in addition because it is retired");
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 12;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "node-1-3-50-03", "node-1-3-50-04");
assertEquals(10+2, model.getRoot().hostSystem().getHosts().size());
assertEquals(3+2, model.getAdmin().getSlobroks().size(), "Includes retired node");
assertEquals("node-1-3-50-12", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("node-1-3-50-11", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("node-1-3-50-10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("node-1-3-50-04", model.getAdmin().getSlobroks().get(3).getHostName(), "Included in addition because it is retired");
assertEquals("node-1-3-50-03", model.getAdmin().getSlobroks().get(4).getHostName(), "Included in addition because it is retired");
}
@Test
public void testSlobroksAreSpreadOverAllContainerClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='bar'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 16;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "node-1-3-50-15", "node-1-3-50-05", "node-1-3-50-04");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(7, model.getAdmin().getSlobroks().size(), "Includes retired node");
assertEquals("node-1-3-50-16", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("node-1-3-50-14", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("node-1-3-50-15", model.getAdmin().getSlobroks().get(2).getHostName(), "Included in addition because it is retired");
assertEquals("node-1-3-50-03", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("node-1-3-50-05", model.getAdmin().getSlobroks().get(5).getHostName(), "Included in addition because it is retired");
assertEquals("node-1-3-50-04", model.getAdmin().getSlobroks().get(6).getHostName(), "Included in addition because it is retired");
}
@Test
public void testDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='foo'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' />" +
" </content>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' />" +
" </content>" +
"</services>";
int numberOfHosts = 7;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services);
assertEquals(7, model.getRoot().hostSystem().getHosts().size());
ClusterControllerContainerCluster clusterControllers = model.getAdmin().getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("cluster-controllers", clusterControllers.getName());
clusterControllers.getContainers().stream().map(ClusterControllerContainer::getHost).forEach(host -> {
assertTrue(host.spec().membership().get().cluster().isStateful());
assertEquals(ClusterSpec.Type.admin, host.spec().membership().get().cluster().type());
});
}
@Test
public void testLogserverContainerWhenDedicatedLogserver() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers>" +
" <nodes count='1' dedicated='true'/>" +
" </logservers>" +
" </admin>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
boolean useDedicatedNodeForLogserver = false;
testContainerOnLogserverHost(services, useDedicatedNodeForLogserver);
}
@Test
public void testLogForwarderNotInAdminCluster() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers>" +
" <nodes count='1' dedicated='true'/>" +
" </logservers>" +
" <logforwarding>" +
" <splunk deployment-server='bardeplserv:123' client-name='barclinam' phone-home-interval='987' />" +
" </logforwarding>" +
" </admin>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts+1);
VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Admin admin = model.getAdmin();
Logserver logserver = admin.getLogserver();
HostResource hostResource = logserver.getHostResource();
assertNotNull(hostResource.getService("logserver"));
assertNull(hostResource.getService("container"));
assertNull(hostResource.getService("logforwarder"));
var clist = model.getContainerClusters().get("foo").getContainers();
assertEquals(1, clist.size());
hostResource = clist.get(0).getHostResource();
assertNull(hostResource.getService("logserver"));
assertNotNull(hostResource.getService("container"));
assertNotNull(hostResource.getService("logforwarder"));
var lfs = hostResource.getService("logforwarder");
String shutdown = lfs.getPreShutdownCommand().orElse("<none>");
assertTrue(shutdown.startsWith("$ROOT/bin/vespa-logforwarder-start -S -c hosts/"));
}
@Test
public void testLogForwarderInAdminCluster() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers>" +
" <nodes count='1' dedicated='true'/>" +
" </logservers>" +
" <logforwarding include-admin='true'>" +
" <splunk deployment-server='bardeplserv:123' client-name='barclinam' phone-home-interval='987' />" +
" </logforwarding>" +
" </admin>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts+1);
VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Admin admin = model.getAdmin();
Logserver logserver = admin.getLogserver();
HostResource hostResource = logserver.getHostResource();
assertNotNull(hostResource.getService("logserver"));
assertNull(hostResource.getService("container"));
assertNotNull(hostResource.getService("logforwarder"));
var clist = model.getContainerClusters().get("foo").getContainers();
assertEquals(1, clist.size());
hostResource = clist.get(0).getHostResource();
assertNull(hostResource.getService("logserver"));
assertNotNull(hostResource.getService("container"));
assertNotNull(hostResource.getService("logforwarder"));
}
@Test
public void testImplicitLogserverContainer() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
boolean useDedicatedNodeForLogserver = true;
testContainerOnLogserverHost(services, useDedicatedNodeForLogserver);
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveReadyCopies());
assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(3, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(2, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals(1, subGroups.get(0).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(0).getNodes().get(1).getConfigId());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(2, subGroups.get(1).getNodes().size());
assertEquals(2, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/2", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals(3, subGroups.get(1).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/3", subGroups.get(1).getNodes().get(1).getConfigId());
assertEquals("2", subGroups.get(2).getIndex());
assertEquals(2, subGroups.get(2).getNodes().size());
assertEquals(4, subGroups.get(2).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/4", subGroups.get(2).getNodes().get(0).getConfigId());
assertEquals(5, subGroups.get(2).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/5", subGroups.get(2).getNodes().get(1).getConfigId());
}
@Test
public void testRedundancyWithGroupsTooHighRedundancyAndOneRetiredNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' groups='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
try {
VespaModel model = tester.createModel(services, false, "node-1-3-50-03");
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("Cluster 'bar' specifies redundancy 2, but it cannot be higher than the minimum nodes per group, which is 1", Exceptions.toMessageString(e));
}
}
@Test
public void testRedundancyWithGroupsAndThreeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' groups='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false, "node-1-3-50-05", "node-1-3-50-04", "node-1-3-50-03");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2, cluster.redundancy().effectiveReadyCopies());
assertEquals("1|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(2, cluster.getRootGroup().getSubgroups().size());
}
@Test
public void testRedundancy2DownscaledToOneNodeButOneRetired() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false, false, true, "node-1-3-50-03");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2, cluster.getStorageCluster().getChildren().size());
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(2, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
}
@Test
public void testUsingNodesCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <search/>" +
" <nodes count='2'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(4, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(4, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(4, cluster.redundancy().effectiveReadyCopies());
assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertEquals(4, cluster.getSearch().getIndexed().getSearchableCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", cluster.getRootGroup().getNodes().get(0).getConfigId());
assertEquals(1, cluster.getRootGroup().getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", cluster.getRootGroup().getNodes().get(1).getConfigId());
assertEquals(2, cluster.getRootGroup().getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/2", cluster.getRootGroup().getNodes().get(2).getConfigId());
assertEquals(3, cluster.getRootGroup().getNodes().get(3).getDistributionKey());
assertEquals("bar/storage/3", cluster.getRootGroup().getNodes().get(3).getConfigId());
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", cluster.getRootGroup().getNodes().get(0).getConfigId());
}
@Test
public void testRequiringMoreNodesThanAreAvailable() {
assertThrows(IllegalArgumentException.class, () -> {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
});
}
@Test
public void testRequiredNodesAndDedicatedClusterControllers() {
assertThrows(IllegalArgumentException.class, () -> {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='foo'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 4;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
});
}
@Test
public void testExclusiveNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<container version='1.0' id='container'>" +
" <nodes count='2' exclusive='true'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' exclusive='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive()));
}
@Test
public void testUsingNodesCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", cluster.getRootGroup().getNodes().get(0).getConfigId());
}
@Test
@Test
public void testRequestingRangesMin() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='[4, 6]'>" +
" <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='[6, 20]' groups='[3,4]'>" +
" <resources vcpu='8' memory='200Gb' disk='1Pb'/>" +
" </nodes>" +
" </content>" +
"</services>";
int totalHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(11.5, 10, 30, 0.3), 6);
tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20);
tester.addHosts(new NodeResources( 0.5, 2, 10, 0.3), 3);
VespaModel model = tester.createModel(services, true);
assertEquals(totalHosts + 3, model.getRoot().hostSystem().getHosts().size());
}
@Test
public void testRequestingRangesMax() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='[4, 6]'>" +
" <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='[6, 20]' groups='[3,4]'>" +
" <resources vcpu='8' memory='200Gb' disk='1Pb'/>" +
" </nodes>" +
" </content>" +
"</services>";
int totalHosts = 29;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 6);
tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20);
tester.addHosts(new NodeResources( 0.5, 2, 10, 0.3), 3);
VespaModel model = tester.createModel(services, true, true);
assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size());
}
@Test
public void testUseArm64NodesForAdminCluster() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='4.0'>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <nodes count='2'>" +
" <resources vcpu='2' memory='8Gb' disk='30Gb'/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='2' memory='8Gb' disk='30Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(true);
tester.setAdminClusterArchitecture(Architecture.arm64);
tester.useDedicatedNodeForLogserver(true);
tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 4);
tester.addHosts(new NodeResources(0.5, 2, 50, 0.3, DiskSpeed.fast, StorageType.any, Architecture.arm64), 4);
VespaModel model = tester.createModel(services, true, true);
List<HostResource> hosts = model.getRoot().hostSystem().getHosts();
assertEquals(8, hosts.size());
Set<HostResource> clusterControllerResources = getHostResourcesForService(hosts, "container-clustercontroller");
assertEquals(3, clusterControllerResources.size());
assertTrue(clusterControllerResources.stream().allMatch(host -> host.realResources().architecture() == Architecture.arm64));
Set<HostResource> logserverResources = getHostResourcesForService(hosts, "logserver-container");
assertEquals(1, logserverResources.size());
assertTrue(logserverResources.stream().allMatch(host -> host.realResources().architecture() == Architecture.arm64));
assertTrue(hosts.stream()
.filter(host -> !clusterControllerResources.contains(host))
.filter(host -> !logserverResources.contains(host))
.allMatch(host -> host.realResources().architecture() == Architecture.getDefault()));
}
private Set<HostResource> getHostResourcesForService(List<HostResource> hosts, String service) {
return hosts.stream()
.filter(host -> host.getHostInfo().getServices().stream()
.anyMatch(s -> s.getServiceType().equals(service)))
.collect(Collectors.toSet());
}
@Test
public void testContainerOnly() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<container version='1.0'>" +
" <search/>" +
" <nodes count='3'/>" +
"</container>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(3, model.getContainerClusters().get("container").getContainers().size());
assertNotNull(model.getAdmin().getLogserver());
assertEquals(3, model.getAdmin().getSlobroks().size());
}
@Test
public void testJvmOptions() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<container version='1.0'>" +
" <search/>" +
" <nodes count='3'>" +
" <jvm options='-DfooOption=xyz' /> " +
" </nodes>" +
"</container>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals("-DfooOption=xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions());
}
@Test
public void testUsingHostaliasWithProvisioner() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<admin version='2.0'>" +
" <adminserver hostalias='node1'/>\n"+
"</admin>\n" +
"<container id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
"</container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedVespa() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<container id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" +
" </http>" +
"</container>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(services, true);
assertEquals(2, model.getHosts().size());
assertEquals(1, model.getContainerClusters().size());
assertEquals(2, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testThatStandaloneSyntaxOnHostedVespaRequiresDefaultPort() {
try {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<container id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='8095' />" +
" </http>" +
"</container>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("Illegal port 8095 in http server 'server1': Port must be set to " +
getDefaults().vespaWebServicePort(), e.getMessage());
}
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedManuallyDeployed() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <container id='foo' version='1.0'>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" <node hostalias='node2'/>" +
" </nodes>" +
" </container>" +
" <content id='bar' version='1.0'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <redundancy>2</redundancy>" +
" <nodes>" +
" <group>" +
" <node distribution-key='0' hostalias='node3'/>" +
" <node distribution-key='1' hostalias='node4'/>" +
" </group>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(true);
tester.addHosts(4);
VespaModel model = tester.createModel(new Zone(Environment.dev, RegionName.from("us-central-1")), services, true);
assertEquals(3, model.getHosts().size(), "We get 1 node per cluster and no admin node apart from the dedicated cluster controller");
assertEquals(1, model.getContainerClusters().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
assertEquals(1, model.getAdmin().getClusterControllers().getContainers().size());
}
@Test
public void testThatStandaloneSyntaxWithClusterControllerWorksOnHostedManuallyDeployed() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <nodes count=\"2\" />" +
" </container>" +
" <content id='bar' version='1.0'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <redundancy>1</redundancy>" +
" <nodes>" +
" <group>" +
" <node distribution-key='0' hostalias='node3'/>" +
" </group>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(true);
tester.addHosts(4);
try {
VespaModel model = tester.createModel(new Zone(Environment.staging, RegionName.from("us-central-1")), services, true);
fail("expected failure");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Clusters in hosted environments must have a <nodes count='N'> tag"));
}
}
/** Deploying an application with "nodes count" standalone should give a single-node deployment */
@Test
public void testThatHostedSyntaxWorksOnStandalone() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(3);
VespaModel model = tester.createModel(services, true);
assertEquals(1,
model.getContainerClusters().get("container1").getContainers().size(),
"Nodes in container cluster");
assertEquals(1,
model.getContentClusters().get("content").getRootGroup().getNodes().size(),
"Nodes in content cluster (downscaled)");
assertEquals(1, model.getAdmin().getSlobroks().size());
model.getConfig(new StorStatusConfig.Builder(), "default");
StorageCluster storage = model.getContentClusters().get("content").getStorageCluster();
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
storage.getChildren().get("0").getConfig(builder);
}
/**
* Deploying an application with "nodes count" standalone should give a single-node deployment,
* also if the user has a lingering hosts file from running self-hosted.
*
* NOTE: This does *not* work (but gives an understandable error message),
* but the current code does not get provoke the error that is thrown from HostsXmlProvisioner.prepare
*/
@Test
public void testThatHostedSyntaxWorksOnStandaloneAlsoWithAHostedFile() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(3);
VespaModel model = tester.createModel(services, hosts, true);
assertEquals(1,
model.getContainerClusters().get("container1").getContainers().size(),
"Nodes in container cluster");
assertEquals(1,
model.getContentClusters().get("content").getRootGroup().getNodes().size(),
"Nodes in content cluster (downscaled)");
assertEquals(1, model.getAdmin().getSlobroks().size());
model.getConfig(new StorStatusConfig.Builder(), "default");
StorageCluster storage = model.getContentClusters().get("content").getStorageCluster();
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
storage.getChildren().get("0").getConfig(builder);
}
@Test
public void testNoNodeTagMeansTwoNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(6);
VespaModel model = tester.createModel(services, true);
assertEquals(6, model.getRoot().hostSystem().getHosts().size());
assertEquals(3, model.getAdmin().getSlobroks().size());
assertEquals(2, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
}
@Test
public void testNoNodeTagMeansTwoNodesNoContent() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(services, true);
assertEquals(2, model.getRoot().hostSystem().getHosts().size());
assertEquals(2, model.getAdmin().getSlobroks().size());
assertEquals(2, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testNoNodeTagMeans1NodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size());
}
@Test
public void testSingleNodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" <nodes><node hostalias='foo'/></nodes>"+
" </container>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
}
/** Recreate the combination used in some factory tests */
@Test
public void testMultitenantButNotHosted() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <container id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </container>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testModelWithReferencedIndexingCluster() {
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes>\n" +
" <jvm options=\"-Xms512m -Xmx512m\"/>\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(1, content.getRootGroup().getNodes().size());
ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testSharedNodesNotHosted() {
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" <node hostalias=\"vespa-2\"/>\n" +
" <node hostalias=\"vespa-3\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" +
" <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedModel(false, hosts, services);
assertEquals(3, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(3, content.getRootGroup().getNodes().size());
}
@Test
public void testMultitenantButNotHostedSharedContentNode() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <container id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </container>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" <content id='search' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" </group>" +
" <documents>" +
" <document type='type1'/>" +
" </documents>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testStatefulProperty() {
String servicesXml =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='qrs'>" +
" <nodes count='1'/>" +
" </container>" +
" <container version='1.0' id='zk'>" +
" <zookeeper/>" +
" <nodes count='3'/>" +
" </container>" +
" <content version='1.0' id='content'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(9);
VespaModel model = tester.createModel(servicesXml, true);
Map<String, Boolean> tests = Map.of("qrs", false,
"zk", true,
"content", true);
Map<String, List<HostResource>> hostsByCluster = model.hostSystem().getHosts().stream()
.collect(Collectors.groupingBy(h -> h.spec().membership().get().cluster().id().value()));
tests.forEach((clusterId, stateful) -> {
List<HostResource> hosts = hostsByCluster.getOrDefault(clusterId, List.of());
assertFalse(hosts.isEmpty(), "Hosts are provisioned for '" + clusterId + "'");
assertEquals(stateful,
hosts.stream().allMatch(h -> h.spec().membership().get().cluster().isStateful()),
"Hosts in cluster '" + clusterId + "' are " + (stateful ? "" : "not ") + "stateful");
});
}
@Test
public void containerWithZooKeeperSuboptimalNodeCountDuringRetirement() {
String servicesXml =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='zk'>" +
" <zookeeper/>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(4);
VespaModel model = tester.createModel(servicesXml, true, "node-1-3-50-04");
ApplicationContainerCluster cluster = model.getContainerClusters().get("zk");
assertEquals(1, cluster.getContainers().stream().filter(Container::isRetired).count());
assertEquals(3, cluster.getContainers().stream().filter(c -> !c.isRetired()).count());
}
@Test
public void containerWithZooKeeperJoiningServers() {
Function<Integer, String> servicesXml = (nodeCount) -> {
return "<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='zk'>" +
" <zookeeper/>" +
" <nodes count='" + nodeCount + "'/>" +
" </container>" +
"</services>";
};
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(servicesXml.apply(3), true);
{
ApplicationContainerCluster cluster = model.getContainerClusters().get("zk");
ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder();
cluster.getContainers().forEach(c -> c.getConfig(config));
cluster.getConfig(config);
assertTrue(config.build().server().stream().noneMatch(ZookeeperServerConfig.Server::joining), "Initial servers are not joining");
}
{
VespaModel nextModel = tester.createModel(Zone.defaultZone(), servicesXml.apply(3), true, false, false, 0, Optional.of(model), new DeployState.Builder(), "node-1-3-50-04", "node-1-3-50-03");
ApplicationContainerCluster cluster = nextModel.getContainerClusters().get("zk");
ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder();
cluster.getContainers().forEach(c -> c.getConfig(config));
cluster.getConfig(config);
assertEquals(Map.of(0, false,
1, false,
2, false,
3, true,
4, true),
config.build().server().stream().collect(Collectors.toMap(ZookeeperServerConfig.Server::id,
ZookeeperServerConfig.Server::joining)),
"New nodes are joining");
assertEquals(Map.of(0, false,
1, true,
2, true,
3, false,
4, false),
config.build().server().stream().collect(Collectors.toMap(ZookeeperServerConfig.Server::id,
ZookeeperServerConfig.Server::retired)),
"Retired nodes are retired");
}
}
private VespaModel createNonProvisionedMultitenantModel(String services) {
return createNonProvisionedModel(true, null, services);
}
private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) {
VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, generateSchemas("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
DeployState deployState = new DeployState.Builder().applicationPackage(appPkg).
properties((new TestProperties()).setMultitenant(multitenant)).
build();
return modelCreatorWithMockPkg.create(false, deployState);
}
private int physicalMemoryPercentage(ContainerCluster<?> cluster) {
QrStartConfig.Builder b = new QrStartConfig.Builder();
cluster.getConfig(b);
return b.build().jvm().heapSizeAsPercentageOfPhysicalMemory();
}
private long protonMemorySize(ContentCluster cluster) {
ProtonConfig.Builder b = new ProtonConfig.Builder();
cluster.getSearch().getIndexed().getSearchNode(0).getConfig(b);
return b.build().hwinfo().memory().size();
}
@Test
public void require_that_proton_config_is_tuned_based_on_node_resources() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='2'>",
" <resources vcpu='1' memory='3Gb' disk='9Gb' bandwidth='5Gbps' disk-speed='slow'/>",
" </nodes>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(1, 3, 10, 5, NodeResources.DiskSpeed.slow), 5);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
assertEquals(2, cluster.getSearchNodes().size());
assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001);
assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001);
}
private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
List<SearchNode> searchNodes = cluster.getSearchNodes();
assertTrue(searchNodeIdx < searchNodes.size());
searchNodes.get(searchNodeIdx).getConfig(builder);
return new ProtonConfig(builder);
}
@Test
public void require_that_config_override_and_explicit_proton_tuning_and_resource_limits_have_precedence_over_default_node_resource_tuning() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <config name='vespa.config.search.core.proton'>",
" <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>",
" </config>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='1'>",
" <resources vcpu='1' memory='128Gb' disk='100Gb'/>",
" </nodes>",
" <engine>",
" <proton>",
" <tuning>",
" <searchnode>",
" <flushstrategy>",
" <native>",
" <total>",
" <maxmemorygain>1000</maxmemorygain>",
" </total>",
" </native>",
" </flushstrategy>",
" </searchnode>",
" </tuning>",
" </proton>",
" </engine>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(1, 3, 10, 1), 4);
tester.addHosts(new NodeResources(1, 128, 100, 0.3), 1);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId());
assertEquals(2000, cfg.flush().memory().maxtlssize());
assertEquals(1000, cfg.flush().memory().maxmemory());
assertEquals((long) ((128 - reservedMemoryGb) * GB * 0.08), cfg.flush().memory().each().maxmemory());
}
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
model.getConfig(builder, configId);
return new ProtonConfig(builder);
}
private void testContainerOnLogserverHost(String services, boolean useDedicatedNodeForLogserver) {
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.useDedicatedNodeForLogserver(useDedicatedNodeForLogserver);
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Admin admin = model.getAdmin();
Logserver logserver = admin.getLogserver();
HostResource hostResource = logserver.getHostResource();
assertNotNull(hostResource.getService("logserver"));
String containerServiceType = ContainerServiceType.LOGSERVER_CONTAINER.serviceName;
assertNotNull(hostResource.getService(containerServiceType));
String configId = admin.getLogserver().getHostResource().getService(containerServiceType).getConfigId();
ApplicationMetadataConfig.Builder builder = new ApplicationMetadataConfig.Builder();
model.getConfig(builder, configId);
ApplicationMetadataConfig cfg = new ApplicationMetadataConfig(builder);
assertEquals(1, cfg.generation());
LogdConfig.Builder logdConfigBuilder = new LogdConfig.Builder();
model.getConfig(logdConfigBuilder, configId);
LogdConfig logdConfig = new LogdConfig(logdConfigBuilder);
assertTrue(logdConfig.logserver().use());
}
private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Id combinedId,
ClusterSpec.Type type, VespaModel model) {
assertEquals(nodeCount,
model.hostSystem().getHosts().stream()
.map(h -> h.spec().membership().get().cluster())
.filter(spec -> spec.id().equals(id) && spec.type().equals(type) && spec.combinedId().equals(Optional.ofNullable(combinedId)))
.count(),
"Nodes in cluster " + id + " with type " + type + (combinedId != null ? ", combinedId " + combinedId : ""));
}
private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Type type, VespaModel model) {
assertProvisioned(nodeCount, id, null, type, model);
}
record TestLogger(List<LogMessage> msgs) implements DeployLogger {
public TestLogger() {
this(new ArrayList<>());
}
@Override
public void log(Level level, String message) {
msgs.add(new LogMessage(level, message));
}
record LogMessage(Level level, String message) {}
}
} |
> I think we need a 'type' I'd rather not have the exact GPU type as it may change over time and vary between clouds. Count+memory should be sufficient as there aren't that many Nvidia memory combinations. > If count is number of cores It's a full GPU, not cores. We can discuss the finer points of the syntax in the next architect review. | public void testRequestingSpecificNodeResources() {
String services =
"""
<?xml version='1.0' encoding='utf-8' ?>
<services>
<admin version='4.0'>
<logservers>
<nodes count='1' dedicated='true'>
<resources vcpu='0.1' memory='0.2Gb' disk='300Gb' disk-speed='slow'/>
</nodes>
</logservers>
<slobroks>
<nodes count='2' dedicated='true'>
<resources vcpu='0.1' memory='0.3Gb' disk='1Gb' bandwidth='500Mbps'/>
</nodes>
</slobroks>
</admin>
<container version='1.0' id='container'>
<nodes count='4'>
<resources vcpu='12' memory='10Gb' disk='30Gb' architecture='arm64'/>
</nodes>
</container>
<container version='1.0' id='container2'>
<nodes count='2'>
<resources vcpu='4' memory='16Gb' disk='125Gb'>
<gpu count='1' memory='16Gb'/>
</resources>
</nodes>
</container>
<content version='1.0' id='foo'>
<documents>
<document type='type1' mode='index'/>
</documents>
<nodes count='5'>
<resources vcpu='8' memory='200Gb' disk='1Pb'/>
</nodes>
</content>
<content version='1.0' id='bar'>
<documents>
<document type='type1' mode='index'/>
</documents>
<nodes count='6'>
<resources vcpu='10' memory='64Gb' disk='200Gb'/>
</nodes>
</content>
</services>
""";
int totalHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(0.1, 0.2, 300, 0.3, NodeResources.DiskSpeed.slow), 1);
tester.addHosts(new NodeResources(0.1, 0.3, 1, 0.5), 2);
tester.addHosts(new NodeResources(12, 10, 30, 0.3,
NodeResources.DiskSpeed.fast, NodeResources.StorageType.local, NodeResources.Architecture.arm64), 4);
tester.addHosts(new NodeResources(4, 16, 125, 10,
NodeResources.DiskSpeed.fast, NodeResources.StorageType.local, Architecture.x86_64,
new NodeResources.GpuResources(1, 16)), 4);
tester.addHosts(new NodeResources(8, 200, 1000000, 0.3), 5);
tester.addHosts(new NodeResources(10, 64, 200, 0.3), 6);
tester.addHosts(new NodeResources(0.5, 2, 10, 0.3), 6);
VespaModel model = tester.createModel(services, true, 0);
assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size());
} | <gpu count='1' memory='16Gb'/> | public void testRequestingSpecificNodeResources() {
String services =
"""
<?xml version='1.0' encoding='utf-8' ?>
<services>
<admin version='4.0'>
<logservers>
<nodes count='1' dedicated='true'>
<resources vcpu='0.1' memory='0.2Gb' disk='300Gb' disk-speed='slow'/>
</nodes>
</logservers>
<slobroks>
<nodes count='2' dedicated='true'>
<resources vcpu='0.1' memory='0.3Gb' disk='1Gb' bandwidth='500Mbps'/>
</nodes>
</slobroks>
</admin>
<container version='1.0' id='container'>
<nodes count='4'>
<resources vcpu='12' memory='10Gb' disk='30Gb' architecture='arm64'/>
</nodes>
</container>
<container version='1.0' id='container2'>
<nodes count='2'>
<resources vcpu='4' memory='16Gb' disk='125Gb'>
<gpu count='1' memory='16Gb'/>
</resources>
</nodes>
</container>
<content version='1.0' id='foo'>
<documents>
<document type='type1' mode='index'/>
</documents>
<nodes count='5'>
<resources vcpu='8' memory='200Gb' disk='1Pb'/>
</nodes>
</content>
<content version='1.0' id='bar'>
<documents>
<document type='type1' mode='index'/>
</documents>
<nodes count='6'>
<resources vcpu='10' memory='64Gb' disk='200Gb'/>
</nodes>
</content>
</services>
""";
int totalHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(0.1, 0.2, 300, 0.3, NodeResources.DiskSpeed.slow), 1);
tester.addHosts(new NodeResources(0.1, 0.3, 1, 0.5), 2);
tester.addHosts(new NodeResources(12, 10, 30, 0.3,
NodeResources.DiskSpeed.fast, NodeResources.StorageType.local, NodeResources.Architecture.arm64), 4);
tester.addHosts(new NodeResources(4, 16, 125, 10,
NodeResources.DiskSpeed.fast, NodeResources.StorageType.local, Architecture.x86_64,
new NodeResources.GpuResources(1, 16)), 4);
tester.addHosts(new NodeResources(8, 200, 1000000, 0.3), 5);
tester.addHosts(new NodeResources(10, 64, 200, 0.3), 6);
tester.addHosts(new NodeResources(0.5, 2, 10, 0.3), 6);
VespaModel model = tester.createModel(services, true, 0);
assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size());
} | class ModelProvisioningTest {
@Test
public void testNodesJdisc() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>\n" +
"\n" +
"<admin version='3.0'><nodes count='1' /></admin>\n" +
"<container id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count=\"3\"/>" +
"</container>" +
"<container id='mydisc2' version='1.0'>" +
" <document-processing/>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count='2' preload='lib/blablamalloc.so'>" +
" <jvm allocated-memory='45%' gc-options='-XX:+UseParNewGC' options='-Xlog:gc' />" +
" </nodes>" +
"</container>" +
"</services>";
String hosts ="<hosts>"
+ " <host name='myhost0'>"
+ " <alias>node0</alias>"
+ " </host>"
+ " <host name='myhost1'>"
+ " <alias>node1</alias>"
+ " </host>"
+ " <host name='myhost2'>"
+ " <alias>node2</alias>"
+ " </host>"
+ " <host name='myhost3'>"
+ " <alias>node3</alias>"
+ " </host>"
+ " <host name='myhost4'>"
+ " <alias>node4</alias>"
+ " </host>"
+ " <host name='myhost5'>"
+ " <alias>node5</alias>"
+ " </host>"
+ "</hosts>";
VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services);
VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true, false)));
ApplicationContainerCluster mydisc = model.getContainerClusters().get("mydisc");
ApplicationContainerCluster mydisc2 = model.getContainerClusters().get("mydisc2");
assertEquals(3, mydisc.getContainers().size());
assertEquals("mydisc/container.0", (mydisc.getContainers().get(0).getConfigId()));
assertTrue(mydisc.getContainers().get(0).isInitialized());
assertEquals("mydisc/container.1", mydisc.getContainers().get(1).getConfigId());
assertTrue(mydisc.getContainers().get(1).isInitialized());
assertEquals("mydisc/container.2", mydisc.getContainers().get(2).getConfigId());
assertTrue(mydisc.getContainers().get(2).isInitialized());
assertEquals(2, mydisc2.getContainers().size());
assertEquals("mydisc2/container.0", mydisc2.getContainers().get(0).getConfigId());
assertTrue(mydisc2.getContainers().get(0).isInitialized());
assertEquals("mydisc2/container.1", mydisc2.getContainers().get(1).getConfigId());
assertTrue(mydisc2.getContainers().get(1).isInitialized());
assertEquals("", mydisc.getContainers().get(0).getJvmOptions());
assertEquals("", mydisc.getContainers().get(1).getJvmOptions());
assertEquals("", mydisc.getContainers().get(2).getJvmOptions());
assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(0).getPreLoad());
assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(1).getPreLoad());
assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(2).getPreLoad());
assertEquals(Optional.empty(), mydisc.getMemoryPercentage());
assertEquals("-Xlog:gc", mydisc2.getContainers().get(0).getJvmOptions());
assertEquals("-Xlog:gc", mydisc2.getContainers().get(1).getJvmOptions());
assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(0).getPreLoad());
assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(1).getPreLoad());
assertEquals(Optional.of(45), mydisc2.getMemoryPercentage());
assertEquals(Optional.of("-XX:+UseParNewGC"), mydisc2.getJvmGCOptions());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
mydisc2.getConfig(qrStartBuilder);
QrStartConfig qrsStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals(45, qrsStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
HostSystem hostSystem = model.hostSystem();
assertNotNull(hostSystem.getHostByHostname("myhost0"));
assertNotNull(hostSystem.getHostByHostname("myhost1"));
assertNotNull(hostSystem.getHostByHostname("myhost2"));
assertNotNull(hostSystem.getHostByHostname("myhost3"));
assertNull(hostSystem.getHostByHostname("Nope"));
}
@Test
public void testNodeCountForContentGroup() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
"\n" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
int numberOfHosts = 5;
tester.addHosts(numberOfHosts);
int numberOfContentNodes = 2;
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Map<String, ContentCluster> contentClusters = model.getContentClusters();
ContentCluster cluster = contentClusters.get("bar");
assertEquals(numberOfContentNodes, cluster.getRootGroup().getNodes().size());
int i = 0;
for (StorageNode node : cluster.getRootGroup().getNodes())
assertEquals(i++, node.getDistributionKey());
}
@Test
public void testSeparateClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(8);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(1, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(2, model.getContentClusters().get("content").getRootGroup().getNodes().size(), "Nodes in cluster without ID");
assertEquals(ApplicationContainerCluster.defaultHeapSizePercentageOfTotalNodeMemory, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size for container");
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Type.content, model);
assertProvisioned(1, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content"), ClusterSpec.Type.content, model);
}
@Test
public void testClusterMembership() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(1, model.hostSystem().getHosts().size());
HostResource host = model.hostSystem().getHosts().iterator().next();
assertTrue(host.spec().membership().isPresent());
assertEquals("container", host.spec().membership().get().cluster().type().name());
assertEquals("container1", host.spec().membership().get().cluster().id().value());
}
@Test
public void testCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
TestLogger logger = new TestLogger();
VespaModel model = tester.createModel(xmlWithNodes, true, new DeployState.Builder().deployLogger(logger));
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(18, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is lowered with combined clusters");
assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.18)), protonMemorySize(model.getContentClusters()
.get("content1")), "Memory for proton is lowered to account for the jvm heap");
assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model);
assertEquals(1, logger.msgs().size());
assertEquals("Declaring combined cluster with <nodes of=\"...\"> is deprecated without replacement, " +
"and the feature will be removed in Vespa 9. Use separate container and content clusters instead",
logger.msgs().get(0).message);
}
@Test
public void testCombinedClusterWithJvmHeapSizeOverride() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'>" +
" <jvm allocated-memory=\"30%\"/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(30, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is lowered with combined clusters");
assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.30)), protonMemorySize(model.getContentClusters()
.get("content1")), "Memory for proton is lowered to account for the jvm heap");
assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model);
}
/** For comparison with the above */
@Test
public void testNonCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(7);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(ApplicationContainerCluster.defaultHeapSizePercentageOfTotalNodeMemory, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is normal");
assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1")), "Memory for proton is normal");
}
@Test
public void testCombinedClusterWithJvmOptions() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <document-processing/>" +
" <nodes of='content1'>" +
" <jvm options='-Dtestoption=foo' />" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
for (Container container : model.getContainerClusters().get("container1").getContainers())
assertTrue(container.getJvmOptions().contains("testoption"));
}
@Test
public void testMultipleCombinedClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='content1'/>" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes of='content2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(8);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(3, model.getContentClusters().get("content2").getRootGroup().getNodes().size(), "Nodes in content2");
assertEquals(3, model.getContainerClusters().get("container2").getContainers().size(), "Nodes in container2");
}
@Test
public void testNonExistingCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' contains an invalid reference: referenced service 'container2' is not defined", Exceptions.toMessageString(e));
}
}
@Test
public void testInvalidCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes count='2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' contains an invalid reference: service 'container2' is not a content service", Exceptions.toMessageString(e));
}
}
@Test
public void testCombinedClusterWithZooKeeperFails() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" <zookeeper />" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("ZooKeeper should not be allowed on combined clusters");
} catch (IllegalArgumentException e) {
assertEquals("A combined cluster cannot run ZooKeeper", e.getMessage());
}
}
@Test
public void testUsingNodesAndGroupCountAttributes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 67;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getContainerClusters().size());
Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream()
.map(Container::getHost)
.collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<HostResource> clusterControllerHosts = admin.getClusterControllers().getContainers()
.stream().map(cc -> cc.getHostResource()).collect(Collectors.toSet());
Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue(clusterControllerHosts.containsAll(slobrokHosts), "Slobroks are assigned on cluster controller nodes");
assertTrue(containerHosts.contains(admin.getLogserver().getHost()), "Logserver is assigned from container nodes");
assertEquals(0, admin.getConfigservers().size(), "No in-cluster config servers in a hosted environment");
assertEquals(3, admin.getClusterControllers().getContainers().size(), "Dedicated admin cluster controllers when hosted");
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(3, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-57", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals(1, subGroups.get(0).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(0).getNodes().get(1).getConfigId());
assertEquals(2, subGroups.get(0).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/2", subGroups.get(0).getNodes().get(2).getConfigId());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(3, subGroups.get(1).getNodes().size());
assertEquals(3, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/3", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-54", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals(4, subGroups.get(1).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/4", subGroups.get(1).getNodes().get(1).getConfigId());
assertEquals(5, subGroups.get(1).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/5", subGroups.get(1).getNodes().get(2).getConfigId());
assertEquals("node-1-3-50-51", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("8", subGroups.get(8).getIndex());
assertEquals(3, subGroups.get(8).getNodes().size());
assertEquals(24, subGroups.get(8).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/24", subGroups.get(8).getNodes().get(0).getConfigId());
assertEquals(25, subGroups.get(8).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/25", subGroups.get(8).getNodes().get(1).getConfigId());
assertEquals(26, subGroups.get(8).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/26", subGroups.get(8).getNodes().get(2).getConfigId());
cluster = model.getContentClusters().get("baz");
subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(1, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-27", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(1, subGroups.get(1).getNodes().size());
assertEquals(1, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/1", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-26", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals("node-1-3-50-25", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("26", subGroups.get(26).getIndex());
assertEquals(1, subGroups.get(26).getNodes().size());
assertEquals(26, subGroups.get(26).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/26", subGroups.get(26).getNodes().get(0).getConfigId());
}
@Test
public void testSlobroksOnContainersIfNoContentClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getContainerClusters().size());
Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream()
.map(Container::getHost)
.collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue(containerHosts.containsAll(slobrokHosts),
"Slobroks are assigned from container nodes");
assertTrue(containerHosts.contains(admin.getLogserver().getHost()), "Logserver is assigned from container nodes");
assertEquals(0, admin.getConfigservers().size(), "No in-cluster config servers in a hosted environment");
}
@Test
public void testUsingNodesAndGroupCountAttributesWithoutDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 67;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getContainerClusters().size());
Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream()
.map(Container::getHost)
.collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<HostResource> clusterControllerHosts = admin.getClusterControllers().getContainers()
.stream().map(cc -> cc.getHostResource()).collect(Collectors.toSet());
Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue(clusterControllerHosts.containsAll(slobrokHosts), "Slobroks are assigned on cluster controller nodes");
assertTrue(containerHosts.contains(admin.getLogserver().getHost()), "Logserver is assigned from container nodes");
assertEquals(0, admin.getConfigservers().size(), "No in-cluster config servers in a hosted environment");
assertEquals(3, admin.getClusterControllers().getContainers().size());
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(3, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-57", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals(1, subGroups.get(0).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(0).getNodes().get(1).getConfigId());
assertEquals(2, subGroups.get(0).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/2", subGroups.get(0).getNodes().get(2).getConfigId());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(3, subGroups.get(1).getNodes().size());
assertEquals(3, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/3", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-54", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals(4, subGroups.get(1).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/4", subGroups.get(1).getNodes().get(1).getConfigId());
assertEquals(5, subGroups.get(1).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/5", subGroups.get(1).getNodes().get(2).getConfigId());
assertEquals("node-1-3-50-51", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("8", subGroups.get(8).getIndex());
assertEquals(3, subGroups.get(8).getNodes().size());
assertEquals(24, subGroups.get(8).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/24", subGroups.get(8).getNodes().get(0).getConfigId());
assertEquals(25, subGroups.get(8).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/25", subGroups.get(8).getNodes().get(1).getConfigId());
assertEquals(26, subGroups.get(8).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/26", subGroups.get(8).getNodes().get(2).getConfigId());
cluster = model.getContentClusters().get("baz");
subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(1, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-27", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(1, subGroups.get(1).getNodes().size());
assertEquals(1, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/1", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-26", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals("node-1-3-50-25", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("26", subGroups.get(26).getIndex());
assertEquals(1, subGroups.get(26).getNodes().size());
assertEquals(26, subGroups.get(26).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/26", subGroups.get(26).getNodes().get(0).getConfigId());
}
@Test
public void testGroupsOfSize1() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='8'/>" +
" </content>" +
"</services>";
int numberOfHosts = 21;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ClusterControllerContainerCluster clusterControllers = model.getAdmin().getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("cluster-controllers", clusterControllers.getName());
assertEquals("node-1-3-50-03", clusterControllers.getContainers().get(0).getHostName());
assertEquals("node-1-3-50-02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("node-1-3-50-01", clusterControllers.getContainers().get(2).getHostName());
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(8, subGroups.size());
assertEquals(8, cluster.distributionBits());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(1, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-11", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(1, subGroups.get(1).getNodes().size());
assertEquals(1, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-10", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals("7", subGroups.get(7).getIndex());
assertEquals(1, subGroups.get(7).getNodes().size());
assertEquals(7, subGroups.get(7).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/7", subGroups.get(7).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-04", subGroups.get(7).getNodes().get(0).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 11;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "node-1-3-50-09");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1+3, model.getAdmin().getSlobroks().size(), "Includes retired node");
assertEquals("node-1-3-50-11", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("node-1-3-50-10", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("node-1-3-50-08", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("node-1-3-50-09", model.getAdmin().getSlobroks().get(3).getHostName(), "Included in addition because it is retired");
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 12;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "node-1-3-50-03", "node-1-3-50-04");
assertEquals(10+2, model.getRoot().hostSystem().getHosts().size());
assertEquals(3+2, model.getAdmin().getSlobroks().size(), "Includes retired node");
assertEquals("node-1-3-50-12", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("node-1-3-50-11", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("node-1-3-50-10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("node-1-3-50-04", model.getAdmin().getSlobroks().get(3).getHostName(), "Included in addition because it is retired");
assertEquals("node-1-3-50-03", model.getAdmin().getSlobroks().get(4).getHostName(), "Included in addition because it is retired");
}
@Test
public void testSlobroksAreSpreadOverAllContainerClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='bar'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 16;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "node-1-3-50-15", "node-1-3-50-05", "node-1-3-50-04");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(7, model.getAdmin().getSlobroks().size(), "Includes retired node");
assertEquals("node-1-3-50-16", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("node-1-3-50-14", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("node-1-3-50-15", model.getAdmin().getSlobroks().get(2).getHostName(), "Included in addition because it is retired");
assertEquals("node-1-3-50-03", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("node-1-3-50-05", model.getAdmin().getSlobroks().get(5).getHostName(), "Included in addition because it is retired");
assertEquals("node-1-3-50-04", model.getAdmin().getSlobroks().get(6).getHostName(), "Included in addition because it is retired");
}
@Test
public void testDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='foo'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' />" +
" </content>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' />" +
" </content>" +
"</services>";
int numberOfHosts = 7;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services);
assertEquals(7, model.getRoot().hostSystem().getHosts().size());
ClusterControllerContainerCluster clusterControllers = model.getAdmin().getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("cluster-controllers", clusterControllers.getName());
clusterControllers.getContainers().stream().map(ClusterControllerContainer::getHost).forEach(host -> {
assertTrue(host.spec().membership().get().cluster().isStateful());
assertEquals(ClusterSpec.Type.admin, host.spec().membership().get().cluster().type());
});
}
@Test
public void testLogserverContainerWhenDedicatedLogserver() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers>" +
" <nodes count='1' dedicated='true'/>" +
" </logservers>" +
" </admin>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
boolean useDedicatedNodeForLogserver = false;
testContainerOnLogserverHost(services, useDedicatedNodeForLogserver);
}
@Test
public void testLogForwarderNotInAdminCluster() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers>" +
" <nodes count='1' dedicated='true'/>" +
" </logservers>" +
" <logforwarding>" +
" <splunk deployment-server='bardeplserv:123' client-name='barclinam' phone-home-interval='987' />" +
" </logforwarding>" +
" </admin>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts+1);
VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Admin admin = model.getAdmin();
Logserver logserver = admin.getLogserver();
HostResource hostResource = logserver.getHostResource();
assertNotNull(hostResource.getService("logserver"));
assertNull(hostResource.getService("container"));
assertNull(hostResource.getService("logforwarder"));
var clist = model.getContainerClusters().get("foo").getContainers();
assertEquals(1, clist.size());
hostResource = clist.get(0).getHostResource();
assertNull(hostResource.getService("logserver"));
assertNotNull(hostResource.getService("container"));
assertNotNull(hostResource.getService("logforwarder"));
var lfs = hostResource.getService("logforwarder");
String shutdown = lfs.getPreShutdownCommand().orElse("<none>");
assertTrue(shutdown.startsWith("$ROOT/bin/vespa-logforwarder-start -S -c hosts/"));
}
@Test
public void testLogForwarderInAdminCluster() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers>" +
" <nodes count='1' dedicated='true'/>" +
" </logservers>" +
" <logforwarding include-admin='true'>" +
" <splunk deployment-server='bardeplserv:123' client-name='barclinam' phone-home-interval='987' />" +
" </logforwarding>" +
" </admin>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts+1);
VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Admin admin = model.getAdmin();
Logserver logserver = admin.getLogserver();
HostResource hostResource = logserver.getHostResource();
assertNotNull(hostResource.getService("logserver"));
assertNull(hostResource.getService("container"));
assertNotNull(hostResource.getService("logforwarder"));
var clist = model.getContainerClusters().get("foo").getContainers();
assertEquals(1, clist.size());
hostResource = clist.get(0).getHostResource();
assertNull(hostResource.getService("logserver"));
assertNotNull(hostResource.getService("container"));
assertNotNull(hostResource.getService("logforwarder"));
}
@Test
public void testImplicitLogserverContainer() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
boolean useDedicatedNodeForLogserver = true;
testContainerOnLogserverHost(services, useDedicatedNodeForLogserver);
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveReadyCopies());
assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(3, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(2, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals(1, subGroups.get(0).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(0).getNodes().get(1).getConfigId());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(2, subGroups.get(1).getNodes().size());
assertEquals(2, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/2", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals(3, subGroups.get(1).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/3", subGroups.get(1).getNodes().get(1).getConfigId());
assertEquals("2", subGroups.get(2).getIndex());
assertEquals(2, subGroups.get(2).getNodes().size());
assertEquals(4, subGroups.get(2).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/4", subGroups.get(2).getNodes().get(0).getConfigId());
assertEquals(5, subGroups.get(2).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/5", subGroups.get(2).getNodes().get(1).getConfigId());
}
@Test
public void testRedundancyWithGroupsTooHighRedundancyAndOneRetiredNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' groups='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
try {
VespaModel model = tester.createModel(services, false, "node-1-3-50-03");
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("Cluster 'bar' specifies redundancy 2, but it cannot be higher than the minimum nodes per group, which is 1", Exceptions.toMessageString(e));
}
}
@Test
public void testRedundancyWithGroupsAndThreeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' groups='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false, "node-1-3-50-05", "node-1-3-50-04", "node-1-3-50-03");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2, cluster.redundancy().effectiveReadyCopies());
assertEquals("1|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(2, cluster.getRootGroup().getSubgroups().size());
}
@Test
public void testRedundancy2DownscaledToOneNodeButOneRetired() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false, false, true, "node-1-3-50-03");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2, cluster.getStorageCluster().getChildren().size());
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(2, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
}
@Test
public void testUsingNodesCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <search/>" +
" <nodes count='2'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(4, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(4, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(4, cluster.redundancy().effectiveReadyCopies());
assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertEquals(4, cluster.getSearch().getIndexed().getSearchableCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", cluster.getRootGroup().getNodes().get(0).getConfigId());
assertEquals(1, cluster.getRootGroup().getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", cluster.getRootGroup().getNodes().get(1).getConfigId());
assertEquals(2, cluster.getRootGroup().getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/2", cluster.getRootGroup().getNodes().get(2).getConfigId());
assertEquals(3, cluster.getRootGroup().getNodes().get(3).getDistributionKey());
assertEquals("bar/storage/3", cluster.getRootGroup().getNodes().get(3).getConfigId());
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", cluster.getRootGroup().getNodes().get(0).getConfigId());
}
@Test
public void testRequiringMoreNodesThanAreAvailable() {
assertThrows(IllegalArgumentException.class, () -> {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
});
}
@Test
public void testRequiredNodesAndDedicatedClusterControllers() {
assertThrows(IllegalArgumentException.class, () -> {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='foo'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 4;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
});
}
@Test
public void testExclusiveNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<container version='1.0' id='container'>" +
" <nodes count='2' exclusive='true'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' exclusive='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive()));
}
@Test
public void testUsingNodesCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", cluster.getRootGroup().getNodes().get(0).getConfigId());
}
@Test
@Test
public void testRequestingRangesMin() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='[4, 6]'>" +
" <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='[6, 20]' groups='[3,4]'>" +
" <resources vcpu='8' memory='200Gb' disk='1Pb'/>" +
" </nodes>" +
" </content>" +
"</services>";
int totalHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(11.5, 10, 30, 0.3), 6);
tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20);
tester.addHosts(new NodeResources( 0.5, 2, 10, 0.3), 3);
VespaModel model = tester.createModel(services, true);
assertEquals(totalHosts + 3, model.getRoot().hostSystem().getHosts().size());
}
@Test
public void testRequestingRangesMax() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='[4, 6]'>" +
" <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='[6, 20]' groups='[3,4]'>" +
" <resources vcpu='8' memory='200Gb' disk='1Pb'/>" +
" </nodes>" +
" </content>" +
"</services>";
int totalHosts = 29;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 6);
tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20);
tester.addHosts(new NodeResources( 0.5, 2, 10, 0.3), 3);
VespaModel model = tester.createModel(services, true, true);
assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size());
}
@Test
public void testUseArm64NodesForAdminCluster() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='4.0'>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <nodes count='2'>" +
" <resources vcpu='2' memory='8Gb' disk='30Gb'/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='2' memory='8Gb' disk='30Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(true);
tester.setAdminClusterArchitecture(Architecture.arm64);
tester.useDedicatedNodeForLogserver(true);
tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 4);
tester.addHosts(new NodeResources(0.5, 2, 50, 0.3, DiskSpeed.fast, StorageType.any, Architecture.arm64), 4);
VespaModel model = tester.createModel(services, true, true);
List<HostResource> hosts = model.getRoot().hostSystem().getHosts();
assertEquals(8, hosts.size());
Set<HostResource> clusterControllerResources = getHostResourcesForService(hosts, "container-clustercontroller");
assertEquals(3, clusterControllerResources.size());
assertTrue(clusterControllerResources.stream().allMatch(host -> host.realResources().architecture() == Architecture.arm64));
Set<HostResource> logserverResources = getHostResourcesForService(hosts, "logserver-container");
assertEquals(1, logserverResources.size());
assertTrue(logserverResources.stream().allMatch(host -> host.realResources().architecture() == Architecture.arm64));
assertTrue(hosts.stream()
.filter(host -> !clusterControllerResources.contains(host))
.filter(host -> !logserverResources.contains(host))
.allMatch(host -> host.realResources().architecture() == Architecture.getDefault()));
}
private Set<HostResource> getHostResourcesForService(List<HostResource> hosts, String service) {
return hosts.stream()
.filter(host -> host.getHostInfo().getServices().stream()
.anyMatch(s -> s.getServiceType().equals(service)))
.collect(Collectors.toSet());
}
@Test
public void testContainerOnly() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<container version='1.0'>" +
" <search/>" +
" <nodes count='3'/>" +
"</container>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(3, model.getContainerClusters().get("container").getContainers().size());
assertNotNull(model.getAdmin().getLogserver());
assertEquals(3, model.getAdmin().getSlobroks().size());
}
@Test
public void testJvmOptions() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<container version='1.0'>" +
" <search/>" +
" <nodes count='3'>" +
" <jvm options='-DfooOption=xyz' /> " +
" </nodes>" +
"</container>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals("-DfooOption=xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions());
}
@Test
public void testUsingHostaliasWithProvisioner() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<admin version='2.0'>" +
" <adminserver hostalias='node1'/>\n"+
"</admin>\n" +
"<container id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
"</container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedVespa() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<container id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" +
" </http>" +
"</container>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(services, true);
assertEquals(2, model.getHosts().size());
assertEquals(1, model.getContainerClusters().size());
assertEquals(2, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testThatStandaloneSyntaxOnHostedVespaRequiresDefaultPort() {
try {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<container id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='8095' />" +
" </http>" +
"</container>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("Illegal port 8095 in http server 'server1': Port must be set to " +
getDefaults().vespaWebServicePort(), e.getMessage());
}
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedManuallyDeployed() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <container id='foo' version='1.0'>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" <node hostalias='node2'/>" +
" </nodes>" +
" </container>" +
" <content id='bar' version='1.0'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <redundancy>2</redundancy>" +
" <nodes>" +
" <group>" +
" <node distribution-key='0' hostalias='node3'/>" +
" <node distribution-key='1' hostalias='node4'/>" +
" </group>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(true);
tester.addHosts(4);
VespaModel model = tester.createModel(new Zone(Environment.dev, RegionName.from("us-central-1")), services, true);
assertEquals(3, model.getHosts().size(), "We get 1 node per cluster and no admin node apart from the dedicated cluster controller");
assertEquals(1, model.getContainerClusters().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
assertEquals(1, model.getAdmin().getClusterControllers().getContainers().size());
}
@Test
public void testThatStandaloneSyntaxWithClusterControllerWorksOnHostedManuallyDeployed() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <nodes count=\"2\" />" +
" </container>" +
" <content id='bar' version='1.0'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <redundancy>1</redundancy>" +
" <nodes>" +
" <group>" +
" <node distribution-key='0' hostalias='node3'/>" +
" </group>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(true);
tester.addHosts(4);
try {
VespaModel model = tester.createModel(new Zone(Environment.staging, RegionName.from("us-central-1")), services, true);
fail("expected failure");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Clusters in hosted environments must have a <nodes count='N'> tag"));
}
}
/** Deploying an application with "nodes count" standalone should give a single-node deployment */
@Test
public void testThatHostedSyntaxWorksOnStandalone() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(3);
VespaModel model = tester.createModel(services, true);
assertEquals(1,
model.getContainerClusters().get("container1").getContainers().size(),
"Nodes in container cluster");
assertEquals(1,
model.getContentClusters().get("content").getRootGroup().getNodes().size(),
"Nodes in content cluster (downscaled)");
assertEquals(1, model.getAdmin().getSlobroks().size());
model.getConfig(new StorStatusConfig.Builder(), "default");
StorageCluster storage = model.getContentClusters().get("content").getStorageCluster();
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
storage.getChildren().get("0").getConfig(builder);
}
/**
* Deploying an application with "nodes count" standalone should give a single-node deployment,
* also if the user has a lingering hosts file from running self-hosted.
*
* NOTE: This does *not* work (but gives an understandable error message),
* but the current code does not get provoke the error that is thrown from HostsXmlProvisioner.prepare
*/
@Test
public void testThatHostedSyntaxWorksOnStandaloneAlsoWithAHostedFile() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(3);
VespaModel model = tester.createModel(services, hosts, true);
assertEquals(1,
model.getContainerClusters().get("container1").getContainers().size(),
"Nodes in container cluster");
assertEquals(1,
model.getContentClusters().get("content").getRootGroup().getNodes().size(),
"Nodes in content cluster (downscaled)");
assertEquals(1, model.getAdmin().getSlobroks().size());
model.getConfig(new StorStatusConfig.Builder(), "default");
StorageCluster storage = model.getContentClusters().get("content").getStorageCluster();
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
storage.getChildren().get("0").getConfig(builder);
}
@Test
public void testNoNodeTagMeansTwoNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(6);
VespaModel model = tester.createModel(services, true);
assertEquals(6, model.getRoot().hostSystem().getHosts().size());
assertEquals(3, model.getAdmin().getSlobroks().size());
assertEquals(2, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
}
@Test
public void testNoNodeTagMeansTwoNodesNoContent() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(services, true);
assertEquals(2, model.getRoot().hostSystem().getHosts().size());
assertEquals(2, model.getAdmin().getSlobroks().size());
assertEquals(2, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testNoNodeTagMeans1NodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size());
}
@Test
public void testSingleNodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" <nodes><node hostalias='foo'/></nodes>"+
" </container>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
}
/** Recreate the combination used in some factory tests */
@Test
public void testMultitenantButNotHosted() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <container id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </container>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testModelWithReferencedIndexingCluster() {
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes>\n" +
" <jvm options=\"-Xms512m -Xmx512m\"/>\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(1, content.getRootGroup().getNodes().size());
ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testSharedNodesNotHosted() {
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" <node hostalias=\"vespa-2\"/>\n" +
" <node hostalias=\"vespa-3\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" +
" <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedModel(false, hosts, services);
assertEquals(3, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(3, content.getRootGroup().getNodes().size());
}
@Test
public void testMultitenantButNotHostedSharedContentNode() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <container id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </container>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" <content id='search' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" </group>" +
" <documents>" +
" <document type='type1'/>" +
" </documents>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testStatefulProperty() {
String servicesXml =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='qrs'>" +
" <nodes count='1'/>" +
" </container>" +
" <container version='1.0' id='zk'>" +
" <zookeeper/>" +
" <nodes count='3'/>" +
" </container>" +
" <content version='1.0' id='content'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(9);
VespaModel model = tester.createModel(servicesXml, true);
Map<String, Boolean> tests = Map.of("qrs", false,
"zk", true,
"content", true);
Map<String, List<HostResource>> hostsByCluster = model.hostSystem().getHosts().stream()
.collect(Collectors.groupingBy(h -> h.spec().membership().get().cluster().id().value()));
tests.forEach((clusterId, stateful) -> {
List<HostResource> hosts = hostsByCluster.getOrDefault(clusterId, List.of());
assertFalse(hosts.isEmpty(), "Hosts are provisioned for '" + clusterId + "'");
assertEquals(stateful,
hosts.stream().allMatch(h -> h.spec().membership().get().cluster().isStateful()),
"Hosts in cluster '" + clusterId + "' are " + (stateful ? "" : "not ") + "stateful");
});
}
@Test
public void containerWithZooKeeperSuboptimalNodeCountDuringRetirement() {
String servicesXml =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='zk'>" +
" <zookeeper/>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(4);
VespaModel model = tester.createModel(servicesXml, true, "node-1-3-50-04");
ApplicationContainerCluster cluster = model.getContainerClusters().get("zk");
assertEquals(1, cluster.getContainers().stream().filter(Container::isRetired).count());
assertEquals(3, cluster.getContainers().stream().filter(c -> !c.isRetired()).count());
}
@Test
public void containerWithZooKeeperJoiningServers() {
Function<Integer, String> servicesXml = (nodeCount) -> {
return "<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='zk'>" +
" <zookeeper/>" +
" <nodes count='" + nodeCount + "'/>" +
" </container>" +
"</services>";
};
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(servicesXml.apply(3), true);
{
ApplicationContainerCluster cluster = model.getContainerClusters().get("zk");
ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder();
cluster.getContainers().forEach(c -> c.getConfig(config));
cluster.getConfig(config);
assertTrue(config.build().server().stream().noneMatch(ZookeeperServerConfig.Server::joining), "Initial servers are not joining");
}
{
VespaModel nextModel = tester.createModel(Zone.defaultZone(), servicesXml.apply(3), true, false, false, 0, Optional.of(model), new DeployState.Builder(), "node-1-3-50-04", "node-1-3-50-03");
ApplicationContainerCluster cluster = nextModel.getContainerClusters().get("zk");
ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder();
cluster.getContainers().forEach(c -> c.getConfig(config));
cluster.getConfig(config);
assertEquals(Map.of(0, false,
1, false,
2, false,
3, true,
4, true),
config.build().server().stream().collect(Collectors.toMap(ZookeeperServerConfig.Server::id,
ZookeeperServerConfig.Server::joining)),
"New nodes are joining");
assertEquals(Map.of(0, false,
1, true,
2, true,
3, false,
4, false),
config.build().server().stream().collect(Collectors.toMap(ZookeeperServerConfig.Server::id,
ZookeeperServerConfig.Server::retired)),
"Retired nodes are retired");
}
}
private VespaModel createNonProvisionedMultitenantModel(String services) {
return createNonProvisionedModel(true, null, services);
}
private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) {
VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, generateSchemas("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
DeployState deployState = new DeployState.Builder().applicationPackage(appPkg).
properties((new TestProperties()).setMultitenant(multitenant)).
build();
return modelCreatorWithMockPkg.create(false, deployState);
}
private int physicalMemoryPercentage(ContainerCluster<?> cluster) {
QrStartConfig.Builder b = new QrStartConfig.Builder();
cluster.getConfig(b);
return b.build().jvm().heapSizeAsPercentageOfPhysicalMemory();
}
private long protonMemorySize(ContentCluster cluster) {
ProtonConfig.Builder b = new ProtonConfig.Builder();
cluster.getSearch().getIndexed().getSearchNode(0).getConfig(b);
return b.build().hwinfo().memory().size();
}
@Test
public void require_that_proton_config_is_tuned_based_on_node_resources() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='2'>",
" <resources vcpu='1' memory='3Gb' disk='9Gb' bandwidth='5Gbps' disk-speed='slow'/>",
" </nodes>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(1, 3, 10, 5, NodeResources.DiskSpeed.slow), 5);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
assertEquals(2, cluster.getSearchNodes().size());
assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001);
assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001);
}
private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
List<SearchNode> searchNodes = cluster.getSearchNodes();
assertTrue(searchNodeIdx < searchNodes.size());
searchNodes.get(searchNodeIdx).getConfig(builder);
return new ProtonConfig(builder);
}
@Test
public void require_that_config_override_and_explicit_proton_tuning_and_resource_limits_have_precedence_over_default_node_resource_tuning() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <config name='vespa.config.search.core.proton'>",
" <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>",
" </config>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='1'>",
" <resources vcpu='1' memory='128Gb' disk='100Gb'/>",
" </nodes>",
" <engine>",
" <proton>",
" <tuning>",
" <searchnode>",
" <flushstrategy>",
" <native>",
" <total>",
" <maxmemorygain>1000</maxmemorygain>",
" </total>",
" </native>",
" </flushstrategy>",
" </searchnode>",
" </tuning>",
" </proton>",
" </engine>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(1, 3, 10, 1), 4);
tester.addHosts(new NodeResources(1, 128, 100, 0.3), 1);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId());
assertEquals(2000, cfg.flush().memory().maxtlssize());
assertEquals(1000, cfg.flush().memory().maxmemory());
assertEquals((long) ((128 - reservedMemoryGb) * GB * 0.08), cfg.flush().memory().each().maxmemory());
}
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
model.getConfig(builder, configId);
return new ProtonConfig(builder);
}
private void testContainerOnLogserverHost(String services, boolean useDedicatedNodeForLogserver) {
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.useDedicatedNodeForLogserver(useDedicatedNodeForLogserver);
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Admin admin = model.getAdmin();
Logserver logserver = admin.getLogserver();
HostResource hostResource = logserver.getHostResource();
assertNotNull(hostResource.getService("logserver"));
String containerServiceType = ContainerServiceType.LOGSERVER_CONTAINER.serviceName;
assertNotNull(hostResource.getService(containerServiceType));
String configId = admin.getLogserver().getHostResource().getService(containerServiceType).getConfigId();
ApplicationMetadataConfig.Builder builder = new ApplicationMetadataConfig.Builder();
model.getConfig(builder, configId);
ApplicationMetadataConfig cfg = new ApplicationMetadataConfig(builder);
assertEquals(1, cfg.generation());
LogdConfig.Builder logdConfigBuilder = new LogdConfig.Builder();
model.getConfig(logdConfigBuilder, configId);
LogdConfig logdConfig = new LogdConfig(logdConfigBuilder);
assertTrue(logdConfig.logserver().use());
}
private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Id combinedId,
ClusterSpec.Type type, VespaModel model) {
assertEquals(nodeCount,
model.hostSystem().getHosts().stream()
.map(h -> h.spec().membership().get().cluster())
.filter(spec -> spec.id().equals(id) && spec.type().equals(type) && spec.combinedId().equals(Optional.ofNullable(combinedId)))
.count(),
"Nodes in cluster " + id + " with type " + type + (combinedId != null ? ", combinedId " + combinedId : ""));
}
private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Type type, VespaModel model) {
assertProvisioned(nodeCount, id, null, type, model);
}
record TestLogger(List<LogMessage> msgs) implements DeployLogger {
public TestLogger() {
this(new ArrayList<>());
}
@Override
public void log(Level level, String message) {
msgs.add(new LogMessage(level, message));
}
record LogMessage(Level level, String message) {}
}
} | class ModelProvisioningTest {
@Test
public void testNodesJdisc() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>\n" +
"\n" +
"<admin version='3.0'><nodes count='1' /></admin>\n" +
"<container id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count=\"3\"/>" +
"</container>" +
"<container id='mydisc2' version='1.0'>" +
" <document-processing/>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count='2' preload='lib/blablamalloc.so'>" +
" <jvm allocated-memory='45%' gc-options='-XX:+UseParNewGC' options='-Xlog:gc' />" +
" </nodes>" +
"</container>" +
"</services>";
String hosts ="<hosts>"
+ " <host name='myhost0'>"
+ " <alias>node0</alias>"
+ " </host>"
+ " <host name='myhost1'>"
+ " <alias>node1</alias>"
+ " </host>"
+ " <host name='myhost2'>"
+ " <alias>node2</alias>"
+ " </host>"
+ " <host name='myhost3'>"
+ " <alias>node3</alias>"
+ " </host>"
+ " <host name='myhost4'>"
+ " <alias>node4</alias>"
+ " </host>"
+ " <host name='myhost5'>"
+ " <alias>node5</alias>"
+ " </host>"
+ "</hosts>";
VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services);
VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true, false)));
ApplicationContainerCluster mydisc = model.getContainerClusters().get("mydisc");
ApplicationContainerCluster mydisc2 = model.getContainerClusters().get("mydisc2");
assertEquals(3, mydisc.getContainers().size());
assertEquals("mydisc/container.0", (mydisc.getContainers().get(0).getConfigId()));
assertTrue(mydisc.getContainers().get(0).isInitialized());
assertEquals("mydisc/container.1", mydisc.getContainers().get(1).getConfigId());
assertTrue(mydisc.getContainers().get(1).isInitialized());
assertEquals("mydisc/container.2", mydisc.getContainers().get(2).getConfigId());
assertTrue(mydisc.getContainers().get(2).isInitialized());
assertEquals(2, mydisc2.getContainers().size());
assertEquals("mydisc2/container.0", mydisc2.getContainers().get(0).getConfigId());
assertTrue(mydisc2.getContainers().get(0).isInitialized());
assertEquals("mydisc2/container.1", mydisc2.getContainers().get(1).getConfigId());
assertTrue(mydisc2.getContainers().get(1).isInitialized());
assertEquals("", mydisc.getContainers().get(0).getJvmOptions());
assertEquals("", mydisc.getContainers().get(1).getJvmOptions());
assertEquals("", mydisc.getContainers().get(2).getJvmOptions());
assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(0).getPreLoad());
assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(1).getPreLoad());
assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(2).getPreLoad());
assertEquals(Optional.empty(), mydisc.getMemoryPercentage());
assertEquals("-Xlog:gc", mydisc2.getContainers().get(0).getJvmOptions());
assertEquals("-Xlog:gc", mydisc2.getContainers().get(1).getJvmOptions());
assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(0).getPreLoad());
assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(1).getPreLoad());
assertEquals(Optional.of(45), mydisc2.getMemoryPercentage());
assertEquals(Optional.of("-XX:+UseParNewGC"), mydisc2.getJvmGCOptions());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
mydisc2.getConfig(qrStartBuilder);
QrStartConfig qrsStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals(45, qrsStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
HostSystem hostSystem = model.hostSystem();
assertNotNull(hostSystem.getHostByHostname("myhost0"));
assertNotNull(hostSystem.getHostByHostname("myhost1"));
assertNotNull(hostSystem.getHostByHostname("myhost2"));
assertNotNull(hostSystem.getHostByHostname("myhost3"));
assertNull(hostSystem.getHostByHostname("Nope"));
}
@Test
public void testNodeCountForContentGroup() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
"\n" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
int numberOfHosts = 5;
tester.addHosts(numberOfHosts);
int numberOfContentNodes = 2;
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Map<String, ContentCluster> contentClusters = model.getContentClusters();
ContentCluster cluster = contentClusters.get("bar");
assertEquals(numberOfContentNodes, cluster.getRootGroup().getNodes().size());
int i = 0;
for (StorageNode node : cluster.getRootGroup().getNodes())
assertEquals(i++, node.getDistributionKey());
}
@Test
public void testSeparateClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(8);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(1, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(2, model.getContentClusters().get("content").getRootGroup().getNodes().size(), "Nodes in cluster without ID");
assertEquals(ApplicationContainerCluster.defaultHeapSizePercentageOfTotalNodeMemory, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size for container");
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Type.content, model);
assertProvisioned(1, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content"), ClusterSpec.Type.content, model);
}
@Test
public void testClusterMembership() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(1, model.hostSystem().getHosts().size());
HostResource host = model.hostSystem().getHosts().iterator().next();
assertTrue(host.spec().membership().isPresent());
assertEquals("container", host.spec().membership().get().cluster().type().name());
assertEquals("container1", host.spec().membership().get().cluster().id().value());
}
@Test
public void testCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
TestLogger logger = new TestLogger();
VespaModel model = tester.createModel(xmlWithNodes, true, new DeployState.Builder().deployLogger(logger));
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(18, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is lowered with combined clusters");
assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.18)), protonMemorySize(model.getContentClusters()
.get("content1")), "Memory for proton is lowered to account for the jvm heap");
assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model);
assertEquals(1, logger.msgs().size());
assertEquals("Declaring combined cluster with <nodes of=\"...\"> is deprecated without replacement, " +
"and the feature will be removed in Vespa 9. Use separate container and content clusters instead",
logger.msgs().get(0).message);
}
@Test
public void testCombinedClusterWithJvmHeapSizeOverride() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'>" +
" <jvm allocated-memory=\"30%\"/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(30, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is lowered with combined clusters");
assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.30)), protonMemorySize(model.getContentClusters()
.get("content1")), "Memory for proton is lowered to account for the jvm heap");
assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model);
}
/** For comparison with the above */
@Test
public void testNonCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(7);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(ApplicationContainerCluster.defaultHeapSizePercentageOfTotalNodeMemory, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is normal");
assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1")), "Memory for proton is normal");
}
@Test
public void testCombinedClusterWithJvmOptions() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <document-processing/>" +
" <nodes of='content1'>" +
" <jvm options='-Dtestoption=foo' />" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
for (Container container : model.getContainerClusters().get("container1").getContainers())
assertTrue(container.getJvmOptions().contains("testoption"));
}
@Test
public void testMultipleCombinedClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='content1'/>" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes of='content2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(8);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(3, model.getContentClusters().get("content2").getRootGroup().getNodes().size(), "Nodes in content2");
assertEquals(3, model.getContainerClusters().get("container2").getContainers().size(), "Nodes in container2");
}
@Test
public void testNonExistingCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' contains an invalid reference: referenced service 'container2' is not defined", Exceptions.toMessageString(e));
}
}
@Test
public void testInvalidCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes count='2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' contains an invalid reference: service 'container2' is not a content service", Exceptions.toMessageString(e));
}
}
@Test
public void testCombinedClusterWithZooKeeperFails() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" <zookeeper />" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='1' memory='3Gb' disk='9Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("ZooKeeper should not be allowed on combined clusters");
} catch (IllegalArgumentException e) {
assertEquals("A combined cluster cannot run ZooKeeper", e.getMessage());
}
}
@Test
public void testUsingNodesAndGroupCountAttributes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 67;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getContainerClusters().size());
Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream()
.map(Container::getHost)
.collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<HostResource> clusterControllerHosts = admin.getClusterControllers().getContainers()
.stream().map(cc -> cc.getHostResource()).collect(Collectors.toSet());
Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue(clusterControllerHosts.containsAll(slobrokHosts), "Slobroks are assigned on cluster controller nodes");
assertTrue(containerHosts.contains(admin.getLogserver().getHost()), "Logserver is assigned from container nodes");
assertEquals(0, admin.getConfigservers().size(), "No in-cluster config servers in a hosted environment");
assertEquals(3, admin.getClusterControllers().getContainers().size(), "Dedicated admin cluster controllers when hosted");
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(3, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-57", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals(1, subGroups.get(0).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(0).getNodes().get(1).getConfigId());
assertEquals(2, subGroups.get(0).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/2", subGroups.get(0).getNodes().get(2).getConfigId());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(3, subGroups.get(1).getNodes().size());
assertEquals(3, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/3", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-54", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals(4, subGroups.get(1).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/4", subGroups.get(1).getNodes().get(1).getConfigId());
assertEquals(5, subGroups.get(1).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/5", subGroups.get(1).getNodes().get(2).getConfigId());
assertEquals("node-1-3-50-51", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("8", subGroups.get(8).getIndex());
assertEquals(3, subGroups.get(8).getNodes().size());
assertEquals(24, subGroups.get(8).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/24", subGroups.get(8).getNodes().get(0).getConfigId());
assertEquals(25, subGroups.get(8).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/25", subGroups.get(8).getNodes().get(1).getConfigId());
assertEquals(26, subGroups.get(8).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/26", subGroups.get(8).getNodes().get(2).getConfigId());
cluster = model.getContentClusters().get("baz");
subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(1, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-27", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(1, subGroups.get(1).getNodes().size());
assertEquals(1, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/1", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-26", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals("node-1-3-50-25", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("26", subGroups.get(26).getIndex());
assertEquals(1, subGroups.get(26).getNodes().size());
assertEquals(26, subGroups.get(26).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/26", subGroups.get(26).getNodes().get(0).getConfigId());
}
@Test
public void testSlobroksOnContainersIfNoContentClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getContainerClusters().size());
Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream()
.map(Container::getHost)
.collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue(containerHosts.containsAll(slobrokHosts),
"Slobroks are assigned from container nodes");
assertTrue(containerHosts.contains(admin.getLogserver().getHost()), "Logserver is assigned from container nodes");
assertEquals(0, admin.getConfigservers().size(), "No in-cluster config servers in a hosted environment");
}
@Test
public void testUsingNodesAndGroupCountAttributesWithoutDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 67;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getContainerClusters().size());
Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream()
.map(Container::getHost)
.collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<HostResource> clusterControllerHosts = admin.getClusterControllers().getContainers()
.stream().map(cc -> cc.getHostResource()).collect(Collectors.toSet());
Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue(clusterControllerHosts.containsAll(slobrokHosts), "Slobroks are assigned on cluster controller nodes");
assertTrue(containerHosts.contains(admin.getLogserver().getHost()), "Logserver is assigned from container nodes");
assertEquals(0, admin.getConfigservers().size(), "No in-cluster config servers in a hosted environment");
assertEquals(3, admin.getClusterControllers().getContainers().size());
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(3, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-57", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals(1, subGroups.get(0).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(0).getNodes().get(1).getConfigId());
assertEquals(2, subGroups.get(0).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/2", subGroups.get(0).getNodes().get(2).getConfigId());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(3, subGroups.get(1).getNodes().size());
assertEquals(3, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/3", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-54", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals(4, subGroups.get(1).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/4", subGroups.get(1).getNodes().get(1).getConfigId());
assertEquals(5, subGroups.get(1).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/5", subGroups.get(1).getNodes().get(2).getConfigId());
assertEquals("node-1-3-50-51", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("8", subGroups.get(8).getIndex());
assertEquals(3, subGroups.get(8).getNodes().size());
assertEquals(24, subGroups.get(8).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/24", subGroups.get(8).getNodes().get(0).getConfigId());
assertEquals(25, subGroups.get(8).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/25", subGroups.get(8).getNodes().get(1).getConfigId());
assertEquals(26, subGroups.get(8).getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/26", subGroups.get(8).getNodes().get(2).getConfigId());
cluster = model.getContentClusters().get("baz");
subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(1, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-27", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(1, subGroups.get(1).getNodes().size());
assertEquals(1, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/1", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-26", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals("node-1-3-50-25", subGroups.get(2).getNodes().get(0).getHostName());
assertEquals("26", subGroups.get(26).getIndex());
assertEquals(1, subGroups.get(26).getNodes().size());
assertEquals(26, subGroups.get(26).getNodes().get(0).getDistributionKey());
assertEquals("baz/storage/26", subGroups.get(26).getNodes().get(0).getConfigId());
}
@Test
public void testGroupsOfSize1() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='8'/>" +
" </content>" +
"</services>";
int numberOfHosts = 21;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ClusterControllerContainerCluster clusterControllers = model.getAdmin().getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("cluster-controllers", clusterControllers.getName());
assertEquals("node-1-3-50-03", clusterControllers.getContainers().get(0).getHostName());
assertEquals("node-1-3-50-02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("node-1-3-50-01", clusterControllers.getContainers().get(2).getHostName());
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(8, subGroups.size());
assertEquals(8, cluster.distributionBits());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(1, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-11", subGroups.get(0).getNodes().get(0).getHostName());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(1, subGroups.get(1).getNodes().size());
assertEquals(1, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-10", subGroups.get(1).getNodes().get(0).getHostName());
assertEquals("7", subGroups.get(7).getIndex());
assertEquals(1, subGroups.get(7).getNodes().size());
assertEquals(7, subGroups.get(7).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/7", subGroups.get(7).getNodes().get(0).getConfigId());
assertEquals("node-1-3-50-04", subGroups.get(7).getNodes().get(0).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 11;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "node-1-3-50-09");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(1+3, model.getAdmin().getSlobroks().size(), "Includes retired node");
assertEquals("node-1-3-50-11", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("node-1-3-50-10", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("node-1-3-50-08", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("node-1-3-50-09", model.getAdmin().getSlobroks().get(3).getHostName(), "Included in addition because it is retired");
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 12;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "node-1-3-50-03", "node-1-3-50-04");
assertEquals(10+2, model.getRoot().hostSystem().getHosts().size());
assertEquals(3+2, model.getAdmin().getSlobroks().size(), "Includes retired node");
assertEquals("node-1-3-50-12", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("node-1-3-50-11", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("node-1-3-50-10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("node-1-3-50-04", model.getAdmin().getSlobroks().get(3).getHostName(), "Included in addition because it is retired");
assertEquals("node-1-3-50-03", model.getAdmin().getSlobroks().get(4).getHostName(), "Included in addition because it is retired");
}
@Test
public void testSlobroksAreSpreadOverAllContainerClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='bar'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 16;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "node-1-3-50-15", "node-1-3-50-05", "node-1-3-50-04");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(7, model.getAdmin().getSlobroks().size(), "Includes retired node");
assertEquals("node-1-3-50-16", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("node-1-3-50-14", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("node-1-3-50-15", model.getAdmin().getSlobroks().get(2).getHostName(), "Included in addition because it is retired");
assertEquals("node-1-3-50-03", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("node-1-3-50-05", model.getAdmin().getSlobroks().get(5).getHostName(), "Included in addition because it is retired");
assertEquals("node-1-3-50-04", model.getAdmin().getSlobroks().get(6).getHostName(), "Included in addition because it is retired");
}
@Test
public void testDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='foo'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' />" +
" </content>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' />" +
" </content>" +
"</services>";
int numberOfHosts = 7;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services);
assertEquals(7, model.getRoot().hostSystem().getHosts().size());
ClusterControllerContainerCluster clusterControllers = model.getAdmin().getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("cluster-controllers", clusterControllers.getName());
clusterControllers.getContainers().stream().map(ClusterControllerContainer::getHost).forEach(host -> {
assertTrue(host.spec().membership().get().cluster().isStateful());
assertEquals(ClusterSpec.Type.admin, host.spec().membership().get().cluster().type());
});
}
@Test
public void testLogserverContainerWhenDedicatedLogserver() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers>" +
" <nodes count='1' dedicated='true'/>" +
" </logservers>" +
" </admin>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
boolean useDedicatedNodeForLogserver = false;
testContainerOnLogserverHost(services, useDedicatedNodeForLogserver);
}
@Test
public void testLogForwarderNotInAdminCluster() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers>" +
" <nodes count='1' dedicated='true'/>" +
" </logservers>" +
" <logforwarding>" +
" <splunk deployment-server='bardeplserv:123' client-name='barclinam' phone-home-interval='987' />" +
" </logforwarding>" +
" </admin>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts+1);
VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Admin admin = model.getAdmin();
Logserver logserver = admin.getLogserver();
HostResource hostResource = logserver.getHostResource();
assertNotNull(hostResource.getService("logserver"));
assertNull(hostResource.getService("container"));
assertNull(hostResource.getService("logforwarder"));
var clist = model.getContainerClusters().get("foo").getContainers();
assertEquals(1, clist.size());
hostResource = clist.get(0).getHostResource();
assertNull(hostResource.getService("logserver"));
assertNotNull(hostResource.getService("container"));
assertNotNull(hostResource.getService("logforwarder"));
var lfs = hostResource.getService("logforwarder");
String shutdown = lfs.getPreShutdownCommand().orElse("<none>");
assertTrue(shutdown.startsWith("$ROOT/bin/vespa-logforwarder-start -S -c hosts/"));
}
@Test
public void testLogForwarderInAdminCluster() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers>" +
" <nodes count='1' dedicated='true'/>" +
" </logservers>" +
" <logforwarding include-admin='true'>" +
" <splunk deployment-server='bardeplserv:123' client-name='barclinam' phone-home-interval='987' />" +
" </logforwarding>" +
" </admin>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts+1);
VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Admin admin = model.getAdmin();
Logserver logserver = admin.getLogserver();
HostResource hostResource = logserver.getHostResource();
assertNotNull(hostResource.getService("logserver"));
assertNull(hostResource.getService("container"));
assertNotNull(hostResource.getService("logforwarder"));
var clist = model.getContainerClusters().get("foo").getContainers();
assertEquals(1, clist.size());
hostResource = clist.get(0).getHostResource();
assertNull(hostResource.getService("logserver"));
assertNotNull(hostResource.getService("container"));
assertNotNull(hostResource.getService("logforwarder"));
}
@Test
public void testImplicitLogserverContainer() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
boolean useDedicatedNodeForLogserver = true;
testContainerOnLogserverHost(services, useDedicatedNodeForLogserver);
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
List<StorageGroup> subGroups = cluster.getRootGroup().getSubgroups();
assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveReadyCopies());
assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(3, subGroups.size());
assertEquals("0", subGroups.get(0).getIndex());
assertEquals(2, subGroups.get(0).getNodes().size());
assertEquals(0, subGroups.get(0).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", subGroups.get(0).getNodes().get(0).getConfigId());
assertEquals(1, subGroups.get(0).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", subGroups.get(0).getNodes().get(1).getConfigId());
assertEquals("1", subGroups.get(1).getIndex());
assertEquals(2, subGroups.get(1).getNodes().size());
assertEquals(2, subGroups.get(1).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/2", subGroups.get(1).getNodes().get(0).getConfigId());
assertEquals(3, subGroups.get(1).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/3", subGroups.get(1).getNodes().get(1).getConfigId());
assertEquals("2", subGroups.get(2).getIndex());
assertEquals(2, subGroups.get(2).getNodes().size());
assertEquals(4, subGroups.get(2).getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/4", subGroups.get(2).getNodes().get(0).getConfigId());
assertEquals(5, subGroups.get(2).getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/5", subGroups.get(2).getNodes().get(1).getConfigId());
}
@Test
public void testRedundancyWithGroupsTooHighRedundancyAndOneRetiredNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' groups='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
try {
VespaModel model = tester.createModel(services, false, "node-1-3-50-03");
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("Cluster 'bar' specifies redundancy 2, but it cannot be higher than the minimum nodes per group, which is 1", Exceptions.toMessageString(e));
}
}
@Test
public void testRedundancyWithGroupsAndThreeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' groups='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false, "node-1-3-50-05", "node-1-3-50-04", "node-1-3-50-03");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2, cluster.redundancy().effectiveReadyCopies());
assertEquals("1|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(2, cluster.getRootGroup().getSubgroups().size());
}
@Test
public void testRedundancy2DownscaledToOneNodeButOneRetired() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false, false, true, "node-1-3-50-03");
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2, cluster.getStorageCluster().getChildren().size());
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(2, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
}
@Test
public void testUsingNodesCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <search/>" +
" <nodes count='2'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(4, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(4, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(4, cluster.redundancy().effectiveReadyCopies());
assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertEquals(4, cluster.getSearch().getIndexed().getSearchableCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", cluster.getRootGroup().getNodes().get(0).getConfigId());
assertEquals(1, cluster.getRootGroup().getNodes().get(1).getDistributionKey());
assertEquals("bar/storage/1", cluster.getRootGroup().getNodes().get(1).getConfigId());
assertEquals(2, cluster.getRootGroup().getNodes().get(2).getDistributionKey());
assertEquals("bar/storage/2", cluster.getRootGroup().getNodes().get(2).getConfigId());
assertEquals(3, cluster.getRootGroup().getNodes().get(3).getDistributionKey());
assertEquals("bar/storage/3", cluster.getRootGroup().getNodes().get(3).getConfigId());
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", cluster.getRootGroup().getNodes().get(0).getConfigId());
}
@Test
public void testRequiringMoreNodesThanAreAvailable() {
assertThrows(IllegalArgumentException.class, () -> {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
});
}
@Test
public void testRequiredNodesAndDedicatedClusterControllers() {
assertThrows(IllegalArgumentException.class, () -> {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='foo'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 4;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
});
}
@Test
public void testExclusiveNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<container version='1.0' id='container'>" +
" <nodes count='2' exclusive='true'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' exclusive='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive()));
}
@Test
public void testUsingNodesCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getNodes().get(0).getDistributionKey());
assertEquals("bar/storage/0", cluster.getRootGroup().getNodes().get(0).getConfigId());
}
@Test
@Test
public void testRequestingRangesMin() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='[4, 6]'>" +
" <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='[6, 20]' groups='[3,4]'>" +
" <resources vcpu='8' memory='200Gb' disk='1Pb'/>" +
" </nodes>" +
" </content>" +
"</services>";
int totalHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(11.5, 10, 30, 0.3), 6);
tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20);
tester.addHosts(new NodeResources( 0.5, 2, 10, 0.3), 3);
VespaModel model = tester.createModel(services, true);
assertEquals(totalHosts + 3, model.getRoot().hostSystem().getHosts().size());
}
@Test
public void testRequestingRangesMax() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='[4, 6]'>" +
" <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='[6, 20]' groups='[3,4]'>" +
" <resources vcpu='8' memory='200Gb' disk='1Pb'/>" +
" </nodes>" +
" </content>" +
"</services>";
int totalHosts = 29;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 6);
tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20);
tester.addHosts(new NodeResources( 0.5, 2, 10, 0.3), 3);
VespaModel model = tester.createModel(services, true, true);
assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size());
}
@Test
public void testUseArm64NodesForAdminCluster() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='4.0'>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <nodes count='2'>" +
" <resources vcpu='2' memory='8Gb' disk='30Gb'/>" +
" </nodes>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'>" +
" <resources vcpu='2' memory='8Gb' disk='30Gb'/>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(true);
tester.setAdminClusterArchitecture(Architecture.arm64);
tester.useDedicatedNodeForLogserver(true);
tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 4);
tester.addHosts(new NodeResources(0.5, 2, 50, 0.3, DiskSpeed.fast, StorageType.any, Architecture.arm64), 4);
VespaModel model = tester.createModel(services, true, true);
List<HostResource> hosts = model.getRoot().hostSystem().getHosts();
assertEquals(8, hosts.size());
Set<HostResource> clusterControllerResources = getHostResourcesForService(hosts, "container-clustercontroller");
assertEquals(3, clusterControllerResources.size());
assertTrue(clusterControllerResources.stream().allMatch(host -> host.realResources().architecture() == Architecture.arm64));
Set<HostResource> logserverResources = getHostResourcesForService(hosts, "logserver-container");
assertEquals(1, logserverResources.size());
assertTrue(logserverResources.stream().allMatch(host -> host.realResources().architecture() == Architecture.arm64));
assertTrue(hosts.stream()
.filter(host -> !clusterControllerResources.contains(host))
.filter(host -> !logserverResources.contains(host))
.allMatch(host -> host.realResources().architecture() == Architecture.getDefault()));
}
private Set<HostResource> getHostResourcesForService(List<HostResource> hosts, String service) {
return hosts.stream()
.filter(host -> host.getHostInfo().getServices().stream()
.anyMatch(s -> s.getServiceType().equals(service)))
.collect(Collectors.toSet());
}
@Test
public void testContainerOnly() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<container version='1.0'>" +
" <search/>" +
" <nodes count='3'/>" +
"</container>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals(3, model.getContainerClusters().get("container").getContainers().size());
assertNotNull(model.getAdmin().getLogserver());
assertEquals(3, model.getAdmin().getSlobroks().size());
}
@Test
public void testJvmOptions() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<container version='1.0'>" +
" <search/>" +
" <nodes count='3'>" +
" <jvm options='-DfooOption=xyz' /> " +
" </nodes>" +
"</container>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
assertEquals("-DfooOption=xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions());
}
@Test
public void testUsingHostaliasWithProvisioner() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<admin version='2.0'>" +
" <adminserver hostalias='node1'/>\n"+
"</admin>\n" +
"<container id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
"</container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedVespa() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<container id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" +
" </http>" +
"</container>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(services, true);
assertEquals(2, model.getHosts().size());
assertEquals(1, model.getContainerClusters().size());
assertEquals(2, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testThatStandaloneSyntaxOnHostedVespaRequiresDefaultPort() {
try {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<container id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='8095' />" +
" </http>" +
"</container>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("Illegal port 8095 in http server 'server1': Port must be set to " +
getDefaults().vespaWebServicePort(), e.getMessage());
}
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedManuallyDeployed() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <container id='foo' version='1.0'>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" <node hostalias='node2'/>" +
" </nodes>" +
" </container>" +
" <content id='bar' version='1.0'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <redundancy>2</redundancy>" +
" <nodes>" +
" <group>" +
" <node distribution-key='0' hostalias='node3'/>" +
" <node distribution-key='1' hostalias='node4'/>" +
" </group>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(true);
tester.addHosts(4);
VespaModel model = tester.createModel(new Zone(Environment.dev, RegionName.from("us-central-1")), services, true);
assertEquals(3, model.getHosts().size(), "We get 1 node per cluster and no admin node apart from the dedicated cluster controller");
assertEquals(1, model.getContainerClusters().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
assertEquals(1, model.getAdmin().getClusterControllers().getContainers().size());
}
@Test
public void testThatStandaloneSyntaxWithClusterControllerWorksOnHostedManuallyDeployed() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <nodes count=\"2\" />" +
" </container>" +
" <content id='bar' version='1.0'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <redundancy>1</redundancy>" +
" <nodes>" +
" <group>" +
" <node distribution-key='0' hostalias='node3'/>" +
" </group>" +
" </nodes>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(true);
tester.addHosts(4);
try {
VespaModel model = tester.createModel(new Zone(Environment.staging, RegionName.from("us-central-1")), services, true);
fail("expected failure");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Clusters in hosted environments must have a <nodes count='N'> tag"));
}
}
/** Deploying an application with "nodes count" standalone should give a single-node deployment */
@Test
public void testThatHostedSyntaxWorksOnStandalone() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(3);
VespaModel model = tester.createModel(services, true);
assertEquals(1,
model.getContainerClusters().get("container1").getContainers().size(),
"Nodes in container cluster");
assertEquals(1,
model.getContentClusters().get("content").getRootGroup().getNodes().size(),
"Nodes in content cluster (downscaled)");
assertEquals(1, model.getAdmin().getSlobroks().size());
model.getConfig(new StorStatusConfig.Builder(), "default");
StorageCluster storage = model.getContentClusters().get("content").getStorageCluster();
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
storage.getChildren().get("0").getConfig(builder);
}
/**
* Deploying an application with "nodes count" standalone should give a single-node deployment,
* also if the user has a lingering hosts file from running self-hosted.
*
* NOTE: This does *not* work (but gives an understandable error message),
* but the current code does not get provoke the error that is thrown from HostsXmlProvisioner.prepare
*/
@Test
public void testThatHostedSyntaxWorksOnStandaloneAlsoWithAHostedFile() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(3);
VespaModel model = tester.createModel(services, hosts, true);
assertEquals(1,
model.getContainerClusters().get("container1").getContainers().size(),
"Nodes in container cluster");
assertEquals(1,
model.getContentClusters().get("content").getRootGroup().getNodes().size(),
"Nodes in content cluster (downscaled)");
assertEquals(1, model.getAdmin().getSlobroks().size());
model.getConfig(new StorStatusConfig.Builder(), "default");
StorageCluster storage = model.getContentClusters().get("content").getStorageCluster();
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
storage.getChildren().get("0").getConfig(builder);
}
@Test
public void testNoNodeTagMeansTwoNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(6);
VespaModel model = tester.createModel(services, true);
assertEquals(6, model.getRoot().hostSystem().getHosts().size());
assertEquals(3, model.getAdmin().getSlobroks().size());
assertEquals(2, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
}
@Test
public void testNoNodeTagMeansTwoNodesNoContent() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(services, true);
assertEquals(2, model.getRoot().hostSystem().getHosts().size());
assertEquals(2, model.getAdmin().getSlobroks().size());
assertEquals(2, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testNoNodeTagMeans1NodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size());
}
@Test
public void testSingleNodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" <nodes><node hostalias='foo'/></nodes>"+
" </container>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
}
/** Recreate the combination used in some factory tests */
@Test
public void testMultitenantButNotHosted() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <container id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </container>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testModelWithReferencedIndexingCluster() {
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes>\n" +
" <jvm options=\"-Xms512m -Xmx512m\"/>\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(1, content.getRootGroup().getNodes().size());
ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testSharedNodesNotHosted() {
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" <node hostalias=\"vespa-2\"/>\n" +
" <node hostalias=\"vespa-3\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" +
" <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedModel(false, hosts, services);
assertEquals(3, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(3, content.getRootGroup().getNodes().size());
}
@Test
public void testMultitenantButNotHostedSharedContentNode() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <container id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </container>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" <content id='search' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" </group>" +
" <documents>" +
" <document type='type1'/>" +
" </documents>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testStatefulProperty() {
String servicesXml =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='qrs'>" +
" <nodes count='1'/>" +
" </container>" +
" <container version='1.0' id='zk'>" +
" <zookeeper/>" +
" <nodes count='3'/>" +
" </container>" +
" <content version='1.0' id='content'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(9);
VespaModel model = tester.createModel(servicesXml, true);
Map<String, Boolean> tests = Map.of("qrs", false,
"zk", true,
"content", true);
Map<String, List<HostResource>> hostsByCluster = model.hostSystem().getHosts().stream()
.collect(Collectors.groupingBy(h -> h.spec().membership().get().cluster().id().value()));
tests.forEach((clusterId, stateful) -> {
List<HostResource> hosts = hostsByCluster.getOrDefault(clusterId, List.of());
assertFalse(hosts.isEmpty(), "Hosts are provisioned for '" + clusterId + "'");
assertEquals(stateful,
hosts.stream().allMatch(h -> h.spec().membership().get().cluster().isStateful()),
"Hosts in cluster '" + clusterId + "' are " + (stateful ? "" : "not ") + "stateful");
});
}
@Test
public void containerWithZooKeeperSuboptimalNodeCountDuringRetirement() {
String servicesXml =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='zk'>" +
" <zookeeper/>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(4);
VespaModel model = tester.createModel(servicesXml, true, "node-1-3-50-04");
ApplicationContainerCluster cluster = model.getContainerClusters().get("zk");
assertEquals(1, cluster.getContainers().stream().filter(Container::isRetired).count());
assertEquals(3, cluster.getContainers().stream().filter(c -> !c.isRetired()).count());
}
@Test
public void containerWithZooKeeperJoiningServers() {
Function<Integer, String> servicesXml = (nodeCount) -> {
return "<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='zk'>" +
" <zookeeper/>" +
" <nodes count='" + nodeCount + "'/>" +
" </container>" +
"</services>";
};
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(servicesXml.apply(3), true);
{
ApplicationContainerCluster cluster = model.getContainerClusters().get("zk");
ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder();
cluster.getContainers().forEach(c -> c.getConfig(config));
cluster.getConfig(config);
assertTrue(config.build().server().stream().noneMatch(ZookeeperServerConfig.Server::joining), "Initial servers are not joining");
}
{
VespaModel nextModel = tester.createModel(Zone.defaultZone(), servicesXml.apply(3), true, false, false, 0, Optional.of(model), new DeployState.Builder(), "node-1-3-50-04", "node-1-3-50-03");
ApplicationContainerCluster cluster = nextModel.getContainerClusters().get("zk");
ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder();
cluster.getContainers().forEach(c -> c.getConfig(config));
cluster.getConfig(config);
assertEquals(Map.of(0, false,
1, false,
2, false,
3, true,
4, true),
config.build().server().stream().collect(Collectors.toMap(ZookeeperServerConfig.Server::id,
ZookeeperServerConfig.Server::joining)),
"New nodes are joining");
assertEquals(Map.of(0, false,
1, true,
2, true,
3, false,
4, false),
config.build().server().stream().collect(Collectors.toMap(ZookeeperServerConfig.Server::id,
ZookeeperServerConfig.Server::retired)),
"Retired nodes are retired");
}
}
private VespaModel createNonProvisionedMultitenantModel(String services) {
return createNonProvisionedModel(true, null, services);
}
private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) {
VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, generateSchemas("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
DeployState deployState = new DeployState.Builder().applicationPackage(appPkg).
properties((new TestProperties()).setMultitenant(multitenant)).
build();
return modelCreatorWithMockPkg.create(false, deployState);
}
private int physicalMemoryPercentage(ContainerCluster<?> cluster) {
QrStartConfig.Builder b = new QrStartConfig.Builder();
cluster.getConfig(b);
return b.build().jvm().heapSizeAsPercentageOfPhysicalMemory();
}
private long protonMemorySize(ContentCluster cluster) {
ProtonConfig.Builder b = new ProtonConfig.Builder();
cluster.getSearch().getIndexed().getSearchNode(0).getConfig(b);
return b.build().hwinfo().memory().size();
}
@Test
public void require_that_proton_config_is_tuned_based_on_node_resources() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='2'>",
" <resources vcpu='1' memory='3Gb' disk='9Gb' bandwidth='5Gbps' disk-speed='slow'/>",
" </nodes>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(1, 3, 10, 5, NodeResources.DiskSpeed.slow), 5);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
assertEquals(2, cluster.getSearchNodes().size());
assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001);
assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001);
}
private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
List<SearchNode> searchNodes = cluster.getSearchNodes();
assertTrue(searchNodeIdx < searchNodes.size());
searchNodes.get(searchNodeIdx).getConfig(builder);
return new ProtonConfig(builder);
}
@Test
public void require_that_config_override_and_explicit_proton_tuning_and_resource_limits_have_precedence_over_default_node_resource_tuning() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <config name='vespa.config.search.core.proton'>",
" <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>",
" </config>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='1'>",
" <resources vcpu='1' memory='128Gb' disk='100Gb'/>",
" </nodes>",
" <engine>",
" <proton>",
" <tuning>",
" <searchnode>",
" <flushstrategy>",
" <native>",
" <total>",
" <maxmemorygain>1000</maxmemorygain>",
" </total>",
" </native>",
" </flushstrategy>",
" </searchnode>",
" </tuning>",
" </proton>",
" </engine>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(1, 3, 10, 1), 4);
tester.addHosts(new NodeResources(1, 128, 100, 0.3), 1);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId());
assertEquals(2000, cfg.flush().memory().maxtlssize());
assertEquals(1000, cfg.flush().memory().maxmemory());
assertEquals((long) ((128 - reservedMemoryGb) * GB * 0.08), cfg.flush().memory().each().maxmemory());
}
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
model.getConfig(builder, configId);
return new ProtonConfig(builder);
}
private void testContainerOnLogserverHost(String services, boolean useDedicatedNodeForLogserver) {
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.useDedicatedNodeForLogserver(useDedicatedNodeForLogserver);
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
Admin admin = model.getAdmin();
Logserver logserver = admin.getLogserver();
HostResource hostResource = logserver.getHostResource();
assertNotNull(hostResource.getService("logserver"));
String containerServiceType = ContainerServiceType.LOGSERVER_CONTAINER.serviceName;
assertNotNull(hostResource.getService(containerServiceType));
String configId = admin.getLogserver().getHostResource().getService(containerServiceType).getConfigId();
ApplicationMetadataConfig.Builder builder = new ApplicationMetadataConfig.Builder();
model.getConfig(builder, configId);
ApplicationMetadataConfig cfg = new ApplicationMetadataConfig(builder);
assertEquals(1, cfg.generation());
LogdConfig.Builder logdConfigBuilder = new LogdConfig.Builder();
model.getConfig(logdConfigBuilder, configId);
LogdConfig logdConfig = new LogdConfig(logdConfigBuilder);
assertTrue(logdConfig.logserver().use());
}
private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Id combinedId,
ClusterSpec.Type type, VespaModel model) {
assertEquals(nodeCount,
model.hostSystem().getHosts().stream()
.map(h -> h.spec().membership().get().cluster())
.filter(spec -> spec.id().equals(id) && spec.type().equals(type) && spec.combinedId().equals(Optional.ofNullable(combinedId)))
.count(),
"Nodes in cluster " + id + " with type " + type + (combinedId != null ? ", combinedId " + combinedId : ""));
}
private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Type type, VespaModel model) {
assertProvisioned(nodeCount, id, null, type, model);
}
record TestLogger(List<LogMessage> msgs) implements DeployLogger {
public TestLogger() {
this(new ArrayList<>());
}
@Override
public void log(Level level, String message) {
msgs.add(new LogMessage(level, message));
}
record LogMessage(Level level, String message) {}
}
} |
I think it would be nicer to create a sub-object for this. | private static void toSlime(NodeResources resources, Cursor resourcesObject) {
resourcesObject.setDouble(vcpuKey, resources.vcpu());
resourcesObject.setDouble(memoryKey, resources.memoryGb());
resourcesObject.setDouble(diskKey, resources.diskGb());
resourcesObject.setDouble(bandwidthKey, resources.bandwidthGbps());
resourcesObject.setString(diskSpeedKey, diskSpeedToString(resources.diskSpeed()));
resourcesObject.setString(storageTypeKey, storageTypeToString(resources.storageType()));
resourcesObject.setString(architectureKey, architectureToString(resources.architecture()));
if (!resources.gpuResources().isDefault()) {
resourcesObject.setLong(gpuCountKey, resources.gpuResources().count());
resourcesObject.setDouble(gpuMemoryKey, resources.gpuResources().memoryGb());
}
} | if (!resources.gpuResources().isDefault()) { | private static void toSlime(NodeResources resources, Cursor resourcesObject) {
resourcesObject.setDouble(vcpuKey, resources.vcpu());
resourcesObject.setDouble(memoryKey, resources.memoryGb());
resourcesObject.setDouble(diskKey, resources.diskGb());
resourcesObject.setDouble(bandwidthKey, resources.bandwidthGbps());
resourcesObject.setString(diskSpeedKey, diskSpeedToString(resources.diskSpeed()));
resourcesObject.setString(storageTypeKey, storageTypeToString(resources.storageType()));
resourcesObject.setString(architectureKey, architectureToString(resources.architecture()));
if (!resources.gpuResources().isDefault()) {
resourcesObject.setLong(gpuCountKey, resources.gpuResources().count());
resourcesObject.setDouble(gpuMemoryKey, resources.gpuResources().memoryGb());
}
} | class AllocatedHostsSerializer {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostNameKey = "hostName";
private static final String hostSpecMembershipKey = "membership";
private static final String realResourcesKey = "realResources";
private static final String advertisedResourcesKey = "advertisedResources";
private static final String requestedResourcesKey = "requestedResources";
private static final String vcpuKey = "vcpu";
private static final String memoryKey = "memory";
private static final String diskKey = "disk";
private static final String bandwidthKey = "bandwidth";
private static final String diskSpeedKey = "diskSpeed";
private static final String storageTypeKey = "storageType";
private static final String architectureKey = "architecture";
private static final String gpuCountKey = "gpuCount";
private static final String gpuMemoryKey = "gpuMemory";
/** Wanted version */
private static final String hostSpecVespaVersionKey = "vespaVersion";
/** Wanted docker image repo */
private static final String hostSpecDockerImageRepoKey = "dockerImageRepo";
/** Current version */
private static final String hostSpecCurrentVespaVersionKey = "currentVespaVersion";
private static final String hostSpecNetworkPortsKey = "ports";
public static byte[] toJson(AllocatedHosts allocatedHosts) throws IOException {
Slime slime = new Slime();
toSlime(allocatedHosts, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static void toSlime(AllocatedHosts allocatedHosts, Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : allocatedHosts.getHosts())
toSlime(host, array.addObject().setObject(hostSpecKey));
}
private static void toSlime(HostSpec host, Cursor object) {
object.setString(hostSpecHostNameKey, host.hostname());
host.membership().ifPresent(membership -> {
object.setString(hostSpecMembershipKey, membership.stringValue());
object.setString(hostSpecVespaVersionKey, membership.cluster().vespaVersion().toFullString());
membership.cluster().dockerImageRepo().ifPresent(repo -> object.setString(hostSpecDockerImageRepoKey, repo.untagged()));
});
toSlime(host.realResources(), object.setObject(realResourcesKey));
toSlime(host.advertisedResources(), object.setObject(advertisedResourcesKey));
host.requestedResources().ifPresent(resources -> toSlime(resources, object.setObject(requestedResourcesKey)));
host.version().ifPresent(version -> object.setString(hostSpecCurrentVespaVersionKey, version.toFullString()));
host.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(hostSpecNetworkPortsKey)));
}
public static AllocatedHosts fromJson(byte[] json) {
return fromSlime(SlimeUtils.jsonToSlime(json).get());
}
public static AllocatedHosts fromSlime(Inspector inspector) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse((ArrayTraverser)(i, host) -> {
hosts.add(hostFromSlime(host.field(hostSpecKey)));
});
return AllocatedHosts.withHosts(hosts);
}
private static HostSpec hostFromSlime(Inspector object) {
if (object.field(hostSpecMembershipKey).valid()) {
return new HostSpec(object.field(hostSpecHostNameKey).asString(),
nodeResourcesFromSlime(object.field(realResourcesKey)),
nodeResourcesFromSlime(object.field(advertisedResourcesKey)),
optionalNodeResourcesFromSlime(object.field(requestedResourcesKey)),
membershipFromSlime(object),
optionalString(object.field(hostSpecCurrentVespaVersionKey)).map(com.yahoo.component.Version::new),
NetworkPortsSerializer.fromSlime(object.field(hostSpecNetworkPortsKey)),
optionalDockerImage(object.field(hostSpecDockerImageRepoKey)));
}
else {
return new HostSpec(object.field(hostSpecHostNameKey).asString(),
NetworkPortsSerializer.fromSlime(object.field(hostSpecNetworkPortsKey)));
}
}
private static NodeResources nodeResourcesFromSlime(Inspector resources) {
return new NodeResources(resources.field(vcpuKey).asDouble(),
resources.field(memoryKey).asDouble(),
resources.field(diskKey).asDouble(),
resources.field(bandwidthKey).asDouble(),
diskSpeedFromSlime(resources.field(diskSpeedKey)),
storageTypeFromSlime(resources.field(storageTypeKey)),
architectureFromSlime(resources.field(architectureKey)),
gpuResourcesFromSlime(resources));
}
private static NodeResources.GpuResources gpuResourcesFromSlime(Inspector resources) {
Inspector gpuCountField = resources.field(gpuCountKey);
Inspector gpuMemoryField = resources.field(gpuMemoryKey);
if (!gpuCountField.valid() || !gpuMemoryField.valid()) return NodeResources.GpuResources.getDefault();
return new NodeResources.GpuResources((int) gpuCountField.asLong(), gpuMemoryField.asDouble());
}
private static NodeResources optionalNodeResourcesFromSlime(Inspector resources) {
if ( ! resources.valid()) return NodeResources.unspecified();
return nodeResourcesFromSlime(resources);
}
private static NodeResources.DiskSpeed diskSpeedFromSlime(Inspector diskSpeed) {
return switch (diskSpeed.asString()) {
case "fast" -> NodeResources.DiskSpeed.fast;
case "slow" -> NodeResources.DiskSpeed.slow;
case "any" -> NodeResources.DiskSpeed.any;
default -> throw new IllegalStateException("Illegal disk-speed value '" + diskSpeed.asString() + "'");
};
}
private static String diskSpeedToString(NodeResources.DiskSpeed diskSpeed) {
return switch (diskSpeed) {
case fast -> "fast";
case slow -> "slow";
case any -> "any";
};
}
private static NodeResources.StorageType storageTypeFromSlime(Inspector storageType) {
return switch (storageType.asString()) {
case "remote" -> NodeResources.StorageType.remote;
case "local" -> NodeResources.StorageType.local;
case "any" -> NodeResources.StorageType.any;
default -> throw new IllegalStateException("Illegal storage-type value '" + storageType.asString() + "'");
};
}
private static String storageTypeToString(NodeResources.StorageType storageType) {
return switch (storageType) {
case remote -> "remote";
case local -> "local";
case any -> "any";
};
}
private static NodeResources.Architecture architectureFromSlime(Inspector architecture) {
if ( ! architecture.valid()) return NodeResources.Architecture.x86_64;
return switch (architecture.asString()) {
case "x86_64" -> NodeResources.Architecture.x86_64;
case "arm64" -> NodeResources.Architecture.arm64;
case "any" -> NodeResources.Architecture.any;
default -> throw new IllegalStateException("Illegal architecture value '" + architecture.asString() + "'");
};
}
private static String architectureToString(NodeResources.Architecture architecture) {
return switch (architecture) {
case x86_64 -> "x86_64";
case arm64 -> "arm64";
case any -> "any";
};
}
private static ClusterMembership membershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembershipKey).asString(),
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersionKey).asString()),
object.field(hostSpecDockerImageRepoKey).valid()
? Optional.of(DockerImage.fromString(object.field(hostSpecDockerImageRepoKey).asString()))
: Optional.empty());
}
private static Optional<String> optionalString(Inspector inspector) {
if ( ! inspector.valid()) return Optional.empty();
return Optional.of(inspector.asString());
}
private static Optional<DockerImage> optionalDockerImage(Inspector inspector) {
if ( ! inspector.valid()) return Optional.empty();
return Optional.of(DockerImage.fromString(inspector.asString()));
}
} | class AllocatedHostsSerializer {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostNameKey = "hostName";
private static final String hostSpecMembershipKey = "membership";
private static final String realResourcesKey = "realResources";
private static final String advertisedResourcesKey = "advertisedResources";
private static final String requestedResourcesKey = "requestedResources";
private static final String vcpuKey = "vcpu";
private static final String memoryKey = "memory";
private static final String diskKey = "disk";
private static final String bandwidthKey = "bandwidth";
private static final String diskSpeedKey = "diskSpeed";
private static final String storageTypeKey = "storageType";
private static final String architectureKey = "architecture";
private static final String gpuCountKey = "gpuCount";
private static final String gpuMemoryKey = "gpuMemory";
/** Wanted version */
private static final String hostSpecVespaVersionKey = "vespaVersion";
/** Wanted docker image repo */
private static final String hostSpecDockerImageRepoKey = "dockerImageRepo";
/** Current version */
private static final String hostSpecCurrentVespaVersionKey = "currentVespaVersion";
private static final String hostSpecNetworkPortsKey = "ports";
public static byte[] toJson(AllocatedHosts allocatedHosts) throws IOException {
Slime slime = new Slime();
toSlime(allocatedHosts, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static void toSlime(AllocatedHosts allocatedHosts, Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : allocatedHosts.getHosts())
toSlime(host, array.addObject().setObject(hostSpecKey));
}
private static void toSlime(HostSpec host, Cursor object) {
object.setString(hostSpecHostNameKey, host.hostname());
host.membership().ifPresent(membership -> {
object.setString(hostSpecMembershipKey, membership.stringValue());
object.setString(hostSpecVespaVersionKey, membership.cluster().vespaVersion().toFullString());
membership.cluster().dockerImageRepo().ifPresent(repo -> object.setString(hostSpecDockerImageRepoKey, repo.untagged()));
});
toSlime(host.realResources(), object.setObject(realResourcesKey));
toSlime(host.advertisedResources(), object.setObject(advertisedResourcesKey));
host.requestedResources().ifPresent(resources -> toSlime(resources, object.setObject(requestedResourcesKey)));
host.version().ifPresent(version -> object.setString(hostSpecCurrentVespaVersionKey, version.toFullString()));
host.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(hostSpecNetworkPortsKey)));
}
public static AllocatedHosts fromJson(byte[] json) {
return fromSlime(SlimeUtils.jsonToSlime(json).get());
}
public static AllocatedHosts fromSlime(Inspector inspector) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse((ArrayTraverser)(i, host) -> {
hosts.add(hostFromSlime(host.field(hostSpecKey)));
});
return AllocatedHosts.withHosts(hosts);
}
private static HostSpec hostFromSlime(Inspector object) {
if (object.field(hostSpecMembershipKey).valid()) {
return new HostSpec(object.field(hostSpecHostNameKey).asString(),
nodeResourcesFromSlime(object.field(realResourcesKey)),
nodeResourcesFromSlime(object.field(advertisedResourcesKey)),
optionalNodeResourcesFromSlime(object.field(requestedResourcesKey)),
membershipFromSlime(object),
optionalString(object.field(hostSpecCurrentVespaVersionKey)).map(com.yahoo.component.Version::new),
NetworkPortsSerializer.fromSlime(object.field(hostSpecNetworkPortsKey)),
optionalDockerImage(object.field(hostSpecDockerImageRepoKey)));
}
else {
return new HostSpec(object.field(hostSpecHostNameKey).asString(),
NetworkPortsSerializer.fromSlime(object.field(hostSpecNetworkPortsKey)));
}
}
private static NodeResources nodeResourcesFromSlime(Inspector resources) {
return new NodeResources(resources.field(vcpuKey).asDouble(),
resources.field(memoryKey).asDouble(),
resources.field(diskKey).asDouble(),
resources.field(bandwidthKey).asDouble(),
diskSpeedFromSlime(resources.field(diskSpeedKey)),
storageTypeFromSlime(resources.field(storageTypeKey)),
architectureFromSlime(resources.field(architectureKey)),
gpuResourcesFromSlime(resources));
}
private static NodeResources.GpuResources gpuResourcesFromSlime(Inspector resources) {
Inspector gpuCountField = resources.field(gpuCountKey);
Inspector gpuMemoryField = resources.field(gpuMemoryKey);
if (!gpuCountField.valid() || !gpuMemoryField.valid()) return NodeResources.GpuResources.getDefault();
return new NodeResources.GpuResources((int) gpuCountField.asLong(), gpuMemoryField.asDouble());
}
private static NodeResources optionalNodeResourcesFromSlime(Inspector resources) {
if ( ! resources.valid()) return NodeResources.unspecified();
return nodeResourcesFromSlime(resources);
}
private static NodeResources.DiskSpeed diskSpeedFromSlime(Inspector diskSpeed) {
return switch (diskSpeed.asString()) {
case "fast" -> NodeResources.DiskSpeed.fast;
case "slow" -> NodeResources.DiskSpeed.slow;
case "any" -> NodeResources.DiskSpeed.any;
default -> throw new IllegalStateException("Illegal disk-speed value '" + diskSpeed.asString() + "'");
};
}
private static String diskSpeedToString(NodeResources.DiskSpeed diskSpeed) {
return switch (diskSpeed) {
case fast -> "fast";
case slow -> "slow";
case any -> "any";
};
}
private static NodeResources.StorageType storageTypeFromSlime(Inspector storageType) {
return switch (storageType.asString()) {
case "remote" -> NodeResources.StorageType.remote;
case "local" -> NodeResources.StorageType.local;
case "any" -> NodeResources.StorageType.any;
default -> throw new IllegalStateException("Illegal storage-type value '" + storageType.asString() + "'");
};
}
private static String storageTypeToString(NodeResources.StorageType storageType) {
return switch (storageType) {
case remote -> "remote";
case local -> "local";
case any -> "any";
};
}
private static NodeResources.Architecture architectureFromSlime(Inspector architecture) {
if ( ! architecture.valid()) return NodeResources.Architecture.x86_64;
return switch (architecture.asString()) {
case "x86_64" -> NodeResources.Architecture.x86_64;
case "arm64" -> NodeResources.Architecture.arm64;
case "any" -> NodeResources.Architecture.any;
default -> throw new IllegalStateException("Illegal architecture value '" + architecture.asString() + "'");
};
}
private static String architectureToString(NodeResources.Architecture architecture) {
return switch (architecture) {
case x86_64 -> "x86_64";
case arm64 -> "arm64";
case any -> "any";
};
}
private static ClusterMembership membershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembershipKey).asString(),
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersionKey).asString()),
object.field(hostSpecDockerImageRepoKey).valid()
? Optional.of(DockerImage.fromString(object.field(hostSpecDockerImageRepoKey).asString()))
: Optional.empty());
}
private static Optional<String> optionalString(Inspector inspector) {
if ( ! inspector.valid()) return Optional.empty();
return Optional.of(inspector.asString());
}
private static Optional<DockerImage> optionalDockerImage(Inspector inspector) {
if ( ! inspector.valid()) return Optional.empty();
return Optional.of(DockerImage.fromString(inspector.asString()));
}
} |
I guess we want to avoid the patterns that gives NaN as it does not compare well. There is a suggestion in #24938 after reading the documentation for Double.longBitsToDouble | public double asDouble() {
XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
byte[] data = value.getBytes(StandardCharsets.UTF_8);
return Double.longBitsToDouble(hasher.hash(data, 0, data.length, 0));
} | return Double.longBitsToDouble(hasher.hash(data, 0, data.length, 0)); | public double asDouble() {
byte[] data = value.getBytes(StandardCharsets.UTF_8);
long h = LongHashFunction.xx3().hashBytes(data);
if ((h & 0x7ff0000000000000L) == 0x7ff0000000000000L) {
h = h & 0xffefffffffffffffL;
}
return Double.longBitsToDouble(h);
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} |
`analyzeUdf` could be removed. | public void analyze(Analyzer analyzer) throws UserException {
super.analyze(analyzer);
analyzeCommon(analyzer);
if (isAggregate) {
analyzeUda();
} else {
if (isStarrocksJar) {
analyzeStarrocksJarUdf();
} else {
analyzeUdf();
}
}
} | analyzeUdf(); | public void analyze(Analyzer analyzer) throws UserException {
super.analyze(analyzer);
analyzeCommon(analyzer);
if (isAggregate) {
analyzeUda();
} else {
Preconditions.checkArgument(isStarrocksJar);
analyzeStarrocksJarUdf();
}
} | class CreateFunctionStmt extends DdlStmt {
public static final String OBJECT_FILE_KEY = "object_file";
public static final String SYMBOL_KEY = "symbol";
public static final String PREPARE_SYMBOL_KEY = "prepare_fn";
public static final String CLOSE_SYMBOL_KEY = "close_fn";
public static final String MD5_CHECKSUM = "md5";
public static final String INIT_KEY = "init_fn";
public static final String UPDATE_KEY = "update_fn";
public static final String MERGE_KEY = "merge_fn";
public static final String SERIALIZE_KEY = "serialize_fn";
public static final String FINALIZE_KEY = "finalize_fn";
public static final String GET_VALUE_KEY = "get_value_fn";
public static final String REMOVE_KEY = "remove_fn";
public static final String TYPE_KEY = "type";
public static final String OBJECT_FORMAT_STARROCKS_JAR = "StarrocksJar";
public static final String EVAL_METHOD_NAME = "evaluate";
private final FunctionName functionName;
private final boolean isAggregate;
private final FunctionArgsDef argsDef;
private final TypeDef returnType;
private TypeDef intermediateType;
private final Map<String, String> properties;
private boolean isStarrocksJar = false;
private String objectFile;
private Function function;
private String checksum;
private Class udfClass;
private static final ImmutableMap<PrimitiveType, Class> PrimitiveTypeToJavaClassType = new ImmutableMap.Builder<PrimitiveType, Class>()
.put(PrimitiveType.BOOLEAN, Boolean.class)
.put(PrimitiveType.TINYINT, Byte.class)
.put(PrimitiveType.SMALLINT, Short.class)
.put(PrimitiveType.INT, Integer.class)
.put(PrimitiveType.FLOAT, Float.class)
.put(PrimitiveType.DOUBLE, Double.class)
.put(PrimitiveType.BIGINT, Long.class)
.put(PrimitiveType.CHAR, String.class)
.put(PrimitiveType.VARCHAR, String.class)
.build();
public CreateFunctionStmt(boolean isAggregate, FunctionName functionName, FunctionArgsDef argsDef,
TypeDef returnType, TypeDef intermediateType, Map<String, String> properties) {
this.functionName = functionName;
this.isAggregate = isAggregate;
this.argsDef = argsDef;
this.returnType = returnType;
this.intermediateType = intermediateType;
if (properties == null) {
this.properties = ImmutableSortedMap.of();
} else {
this.properties = ImmutableSortedMap.copyOf(properties, String.CASE_INSENSITIVE_ORDER);
}
}
public FunctionName getFunctionName() {
return functionName;
}
public Function getFunction() {
return function;
}
@Override
private void analyzeCommon(Analyzer analyzer) throws AnalysisException {
functionName.analyze(analyzer);
if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN");
}
argsDef.analyze(analyzer);
returnType.analyze(analyzer);
if (intermediateType != null) {
intermediateType.analyze(analyzer);
} else {
intermediateType = returnType;
}
String object_format = properties.get(TYPE_KEY);
if (OBJECT_FORMAT_STARROCKS_JAR.equals(object_format)) {
isStarrocksJar = true;
}
objectFile = properties.get(OBJECT_FILE_KEY);
if (Strings.isNullOrEmpty(objectFile)) {
throw new AnalysisException("No 'object_file' in properties");
}
try {
computeObjectChecksum();
} catch (IOException | NoSuchAlgorithmException e) {
throw new AnalysisException("cannot to compute object's checksum");
}
String md5sum = properties.get(MD5_CHECKSUM);
if (md5sum != null && !md5sum.equalsIgnoreCase(checksum)) {
throw new AnalysisException("library's checksum is not equal with input, checksum=" + checksum);
}
if (isStarrocksJar) {
analyzeUdfClassInStarrocksJar();
}
}
private void analyzeUdfClassInStarrocksJar() throws AnalysisException {
String class_name = properties.get(SYMBOL_KEY);
if (Strings.isNullOrEmpty(class_name)) {
throw new AnalysisException("No '" + SYMBOL_KEY + "' in properties");
}
try {
URL[] urls = {new URL("jar:" + objectFile + "!/")};
URLClassLoader cl = URLClassLoader.newInstance(urls);
udfClass = cl.loadClass(class_name);
} catch (MalformedURLException e) {
throw new AnalysisException("failed to load object_file: " + objectFile);
} catch (ClassNotFoundException e) {
throw new AnalysisException("class '" + class_name + "' not found in object_file :" + objectFile);
}
}
private void computeObjectChecksum() throws IOException, NoSuchAlgorithmException {
if (FeConstants.runningUnitTest) {
checksum = "";
return;
}
URL url = new URL(objectFile);
URLConnection urlConnection = url.openConnection();
InputStream inputStream = urlConnection.getInputStream();
MessageDigest digest = MessageDigest.getInstance("MD5");
byte[] buf = new byte[4096];
int bytesRead = 0;
do {
bytesRead = inputStream.read(buf);
if (bytesRead < 0) {
break;
}
digest.update(buf, 0, bytesRead);
} while (true);
checksum = Hex.encodeHexString(digest.digest());
}
private void analyzeUda() throws AnalysisException {
AggregateFunction.AggregateFunctionBuilder builder =
AggregateFunction.AggregateFunctionBuilder.createUdfBuilder();
builder.name(functionName).argsType(argsDef.getArgTypes()).retType(returnType.getType()).
hasVarArgs(argsDef.isVariadic()).intermediateType(intermediateType.getType()).objectFile(objectFile);
String initFnSymbol = properties.get(INIT_KEY);
if (initFnSymbol == null) {
throw new AnalysisException("No 'init_fn' in properties");
}
String updateFnSymbol = properties.get(UPDATE_KEY);
if (updateFnSymbol == null) {
throw new AnalysisException("No 'update_fn' in properties");
}
String mergeFnSymbol = properties.get(MERGE_KEY);
if (mergeFnSymbol == null) {
throw new AnalysisException("No 'merge_fn' in properties");
}
function = builder.build();
function.setChecksum(checksum);
}
private void analyzeUdf() throws AnalysisException {
String symbol = properties.get(SYMBOL_KEY);
if (Strings.isNullOrEmpty(symbol)) {
throw new AnalysisException("No 'symbol' in properties");
}
String prepareFnSymbol = properties.get(PREPARE_SYMBOL_KEY);
String closeFnSymbol = properties.get(CLOSE_SYMBOL_KEY);
function = ScalarFunction.createUdf(
functionName, argsDef.getArgTypes(),
returnType.getType(), argsDef.isVariadic(), TFunctionBinaryType.HIVE,
objectFile, symbol, prepareFnSymbol, closeFnSymbol);
function.setChecksum(checksum);
}
private void checkStarrocksJarUdfType(Type type, Class ptype, String pname) throws AnalysisException {
if (!(type instanceof ScalarType)) {
throw new AnalysisException("UDF does not support non-scalar type: " + type);
}
ScalarType scalarType = (ScalarType) type;
Class cls = PrimitiveTypeToJavaClassType.get(scalarType.getPrimitiveType());
if (cls == null) {
throw new AnalysisException("UDF does not support type: " + scalarType);
}
if (!cls.equals(ptype)) {
throw new AnalysisException(String.format("UDF %s[%s] type does not match %s", pname,
ptype.getCanonicalName(), cls.getCanonicalName()));
}
}
private void checkStarrocksJarUdfMethod(Method method) throws AnalysisException {
String name = method.getName();
boolean checked = true;
if (EVAL_METHOD_NAME.equals(name)) {
Class retType = method.getReturnType();
checkStarrocksJarUdfType(returnType.getType(), retType, "Return");
if (method.getParameters().length != argsDef.getArgTypes().length) {
throw new AnalysisException(String.format("UDF '%s' parameter count does not match", name));
}
for (int i = 0; i < method.getParameters().length; i++) {
Parameter p = method.getParameters()[i];
checkStarrocksJarUdfType(argsDef.getArgTypes()[i], p.getType(), p.getName());
}
} else {
checked = false;
}
if (checked) {
if (Modifier.isStatic(method.getModifiers())) {
throw new AnalysisException(String.format("UDF '%s' should be non-static method", name));
}
}
}
private void checkStarrocksJarUdfClass() throws AnalysisException {
int evalMethodCount = 0;
for (Method m : udfClass.getMethods()) {
if (EVAL_METHOD_NAME.equals(m.getName())) {
evalMethodCount += 1;
}
checkStarrocksJarUdfMethod(m);
}
if (evalMethodCount != 1) {
throw new AnalysisException(String.format("UDF should have only one '%s' method", EVAL_METHOD_NAME));
}
}
private void analyzeStarrocksJarUdf() throws AnalysisException {
checkStarrocksJarUdfClass();
function = ScalarFunction.createUdf(
functionName, argsDef.getArgTypes(),
returnType.getType(), argsDef.isVariadic(), TFunctionBinaryType.SRJAR,
objectFile, udfClass.getCanonicalName(), "", "");
function.setChecksum(checksum);
}
@Override
public String toSql() {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("CREATE ");
if (isAggregate) {
stringBuilder.append("AGGREGATE ");
}
stringBuilder.append("FUNCTION ");
stringBuilder.append(functionName.toString());
stringBuilder.append(argsDef.toSql());
stringBuilder.append(" RETURNS ");
stringBuilder.append(returnType.toString());
if (properties.size() > 0) {
stringBuilder.append(" PROPERTIES (");
int i = 0;
for (Map.Entry<String, String> entry : properties.entrySet()) {
if (i != 0) {
stringBuilder.append(", ");
}
stringBuilder.append('"').append(entry.getKey()).append('"');
stringBuilder.append("=");
stringBuilder.append('"').append(entry.getValue()).append('"');
i++;
}
stringBuilder.append(")");
}
return stringBuilder.toString();
}
@Override
public RedirectStatus getRedirectStatus() {
return RedirectStatus.FORWARD_WITH_SYNC;
}
} | class CreateFunctionStmt extends DdlStmt {
public static final String FILE_KEY = "file";
public static final String SYMBOL_KEY = "symbol";
public static final String MD5_CHECKSUM = "md5";
public static final String INIT_KEY = "init_fn";
public static final String UPDATE_KEY = "update_fn";
public static final String MERGE_KEY = "merge_fn";
public static final String TYPE_KEY = "type";
public static final String TYPE_STARROCKS_JAR = "StarrocksJar";
public static final String EVAL_METHOD_NAME = "evaluate";
private final FunctionName functionName;
private final boolean isAggregate;
private final FunctionArgsDef argsDef;
private final TypeDef returnType;
private TypeDef intermediateType;
private final Map<String, String> properties;
private boolean isStarrocksJar = false;
private String objectFile;
private Function function;
private String checksum;
private Class udfClass;
private static final ImmutableMap<PrimitiveType, Class> PrimitiveTypeToJavaClassType = new ImmutableMap.Builder<PrimitiveType, Class>()
.put(PrimitiveType.BOOLEAN, Boolean.class)
.put(PrimitiveType.TINYINT, Byte.class)
.put(PrimitiveType.SMALLINT, Short.class)
.put(PrimitiveType.INT, Integer.class)
.put(PrimitiveType.FLOAT, Float.class)
.put(PrimitiveType.DOUBLE, Double.class)
.put(PrimitiveType.BIGINT, Long.class)
.put(PrimitiveType.CHAR, String.class)
.put(PrimitiveType.VARCHAR, String.class)
.build();
public CreateFunctionStmt(boolean isAggregate, FunctionName functionName, FunctionArgsDef argsDef,
TypeDef returnType, TypeDef intermediateType, Map<String, String> properties) {
this.functionName = functionName;
this.isAggregate = isAggregate;
this.argsDef = argsDef;
this.returnType = returnType;
this.intermediateType = intermediateType;
if (properties == null) {
this.properties = ImmutableSortedMap.of();
} else {
this.properties = ImmutableSortedMap.copyOf(properties, String.CASE_INSENSITIVE_ORDER);
}
}
public FunctionName getFunctionName() {
return functionName;
}
public Function getFunction() {
return function;
}
@Override
private void analyzeCommon(Analyzer analyzer) throws AnalysisException {
functionName.analyze(analyzer);
if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN");
}
argsDef.analyze(analyzer);
returnType.analyze(analyzer);
if (intermediateType != null) {
intermediateType.analyze(analyzer);
} else {
intermediateType = returnType;
}
String type = properties.get(TYPE_KEY);
if (TYPE_STARROCKS_JAR.equals(type)) {
isStarrocksJar = true;
}
objectFile = properties.get(FILE_KEY);
if (Strings.isNullOrEmpty(objectFile)) {
throw new AnalysisException("No 'object_file' in properties");
}
try {
computeObjectChecksum();
} catch (IOException | NoSuchAlgorithmException e) {
throw new AnalysisException("cannot to compute object's checksum");
}
String md5sum = properties.get(MD5_CHECKSUM);
if (md5sum != null && !md5sum.equalsIgnoreCase(checksum)) {
throw new AnalysisException("library's checksum is not equal with input, checksum=" + checksum);
}
if (isStarrocksJar) {
analyzeUdfClassInStarrocksJar();
}
}
private void analyzeUdfClassInStarrocksJar() throws AnalysisException {
String class_name = properties.get(SYMBOL_KEY);
if (Strings.isNullOrEmpty(class_name)) {
throw new AnalysisException("No '" + SYMBOL_KEY + "' in properties");
}
try {
URL[] urls = {new URL("jar:" + objectFile + "!/")};
URLClassLoader cl = URLClassLoader.newInstance(urls);
udfClass = cl.loadClass(class_name);
} catch (MalformedURLException e) {
throw new AnalysisException("failed to load object_file: " + objectFile);
} catch (ClassNotFoundException e) {
throw new AnalysisException("class '" + class_name + "' not found in object_file :" + objectFile);
}
}
private void computeObjectChecksum() throws IOException, NoSuchAlgorithmException {
if (FeConstants.runningUnitTest) {
checksum = "";
return;
}
URL url = new URL(objectFile);
URLConnection urlConnection = url.openConnection();
InputStream inputStream = urlConnection.getInputStream();
MessageDigest digest = MessageDigest.getInstance("MD5");
byte[] buf = new byte[4096];
int bytesRead = 0;
do {
bytesRead = inputStream.read(buf);
if (bytesRead < 0) {
break;
}
digest.update(buf, 0, bytesRead);
} while (true);
checksum = Hex.encodeHexString(digest.digest());
}
private void analyzeUda() throws AnalysisException {
AggregateFunction.AggregateFunctionBuilder builder =
AggregateFunction.AggregateFunctionBuilder.createUdfBuilder();
builder.name(functionName).argsType(argsDef.getArgTypes()).retType(returnType.getType()).
hasVarArgs(argsDef.isVariadic()).intermediateType(intermediateType.getType()).objectFile(objectFile);
String initFnSymbol = properties.get(INIT_KEY);
if (initFnSymbol == null) {
throw new AnalysisException("No 'init_fn' in properties");
}
String updateFnSymbol = properties.get(UPDATE_KEY);
if (updateFnSymbol == null) {
throw new AnalysisException("No 'update_fn' in properties");
}
String mergeFnSymbol = properties.get(MERGE_KEY);
if (mergeFnSymbol == null) {
throw new AnalysisException("No 'merge_fn' in properties");
}
function = builder.build();
function.setChecksum(checksum);
}
private void checkStarrocksJarUdfType(Type type, Class ptype, String pname) throws AnalysisException {
if (!(type instanceof ScalarType)) {
throw new AnalysisException("UDF does not support non-scalar type: " + type);
}
ScalarType scalarType = (ScalarType) type;
Class cls = PrimitiveTypeToJavaClassType.get(scalarType.getPrimitiveType());
if (cls == null) {
throw new AnalysisException("UDF does not support type: " + scalarType);
}
if (!cls.equals(ptype)) {
throw new AnalysisException(String.format("UDF %s[%s] type does not match %s", pname,
ptype.getCanonicalName(), cls.getCanonicalName()));
}
}
private void checkStarrocksJarUdfMethod(Method method) throws AnalysisException {
String name = method.getName();
boolean checked = true;
if (EVAL_METHOD_NAME.equals(name)) {
Class retType = method.getReturnType();
checkStarrocksJarUdfType(returnType.getType(), retType, "Return");
if (method.getParameters().length != argsDef.getArgTypes().length) {
throw new AnalysisException(String.format("UDF '%s' parameter count does not match", name));
}
for (int i = 0; i < method.getParameters().length; i++) {
Parameter p = method.getParameters()[i];
checkStarrocksJarUdfType(argsDef.getArgTypes()[i], p.getType(), p.getName());
}
} else {
checked = false;
}
if (checked) {
if (Modifier.isStatic(method.getModifiers())) {
throw new AnalysisException(String.format("UDF '%s' should be non-static method", name));
}
if (!Modifier.isPublic(method.getModifiers())) {
throw new AnalysisException(String.format("UDF '%s' should be public method", name));
}
}
}
private void checkStarrocksJarUdfClass() throws AnalysisException {
int evalMethodCount = 0;
for (Method m : udfClass.getMethods()) {
if (EVAL_METHOD_NAME.equals(m.getName())) {
evalMethodCount += 1;
}
checkStarrocksJarUdfMethod(m);
}
if (evalMethodCount != 1) {
throw new AnalysisException(String.format("UDF should have only one '%s' method", EVAL_METHOD_NAME));
}
}
private void analyzeStarrocksJarUdf() throws AnalysisException {
checkStarrocksJarUdfClass();
function = ScalarFunction.createUdf(
functionName, argsDef.getArgTypes(),
returnType.getType(), argsDef.isVariadic(), TFunctionBinaryType.SRJAR,
objectFile, udfClass.getCanonicalName(), "", "");
function.setChecksum(checksum);
}
@Override
public String toSql() {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("CREATE ");
if (isAggregate) {
stringBuilder.append("AGGREGATE ");
}
stringBuilder.append("FUNCTION ");
stringBuilder.append(functionName.toString());
stringBuilder.append(argsDef.toSql());
stringBuilder.append(" RETURNS ");
stringBuilder.append(returnType.toString());
if (properties.size() > 0) {
stringBuilder.append(" PROPERTIES (");
int i = 0;
for (Map.Entry<String, String> entry : properties.entrySet()) {
if (i != 0) {
stringBuilder.append(", ");
}
stringBuilder.append('"').append(entry.getKey()).append('"');
stringBuilder.append("=");
stringBuilder.append('"').append(entry.getValue()).append('"');
i++;
}
stringBuilder.append(")");
}
return stringBuilder.toString();
}
@Override
public RedirectStatus getRedirectStatus() {
return RedirectStatus.FORWARD_WITH_SYNC;
}
} |
Does the hash function need to be the same in both java and c++. We are using the faster XXH3_64 as default for all hashing in the backend. | public double asDouble() {
XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
byte[] data = value.getBytes(StandardCharsets.UTF_8);
return Double.longBitsToDouble(hasher.hash(data, 0, data.length, 0));
} | XXHash64 hasher = XXHashFactory.fastestInstance().hash64(); | public double asDouble() {
byte[] data = value.getBytes(StandardCharsets.UTF_8);
long h = LongHashFunction.xx3().hashBytes(data);
if ((h & 0x7ff0000000000000L) == 0x7ff0000000000000L) {
h = h & 0xffefffffffffffffL;
}
return Double.longBitsToDouble(h);
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} |
Ugh, ok I'll do it | public double asDouble() {
XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
byte[] data = value.getBytes(StandardCharsets.UTF_8);
return Double.longBitsToDouble(hasher.hash(data, 0, data.length, 0));
} | return Double.longBitsToDouble(hasher.hash(data, 0, data.length, 0)); | public double asDouble() {
byte[] data = value.getBytes(StandardCharsets.UTF_8);
long h = LongHashFunction.xx3().hashBytes(data);
if ((h & 0x7ff0000000000000L) == 0x7ff0000000000000L) {
h = h & 0xffefffffffffffffL;
}
return Double.longBitsToDouble(h);
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} |
I don't think we strictly depend on that any more, but it would feel safer to keep them the same ... | public double asDouble() {
XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
byte[] data = value.getBytes(StandardCharsets.UTF_8);
return Double.longBitsToDouble(hasher.hash(data, 0, data.length, 0));
} | XXHash64 hasher = XXHashFactory.fastestInstance().hash64(); | public double asDouble() {
byte[] data = value.getBytes(StandardCharsets.UTF_8);
long h = LongHashFunction.xx3().hashBytes(data);
if ((h & 0x7ff0000000000000L) == 0x7ff0000000000000L) {
h = h & 0xffefffffffffffffL;
}
return Double.longBitsToDouble(h);
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} |
I applied the same trick here as in the backend. | public double asDouble() {
XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
byte[] data = value.getBytes(StandardCharsets.UTF_8);
return Double.longBitsToDouble(hasher.hash(data, 0, data.length, 0));
} | return Double.longBitsToDouble(hasher.hash(data, 0, data.length, 0)); | public double asDouble() {
byte[] data = value.getBytes(StandardCharsets.UTF_8);
long h = LongHashFunction.xx3().hashBytes(data);
if ((h & 0x7ff0000000000000L) == 0x7ff0000000000000L) {
h = h & 0xffefffffffffffffL;
}
return Double.longBitsToDouble(h);
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} |
Yes, file distribution I think. | public double asDouble() {
XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
byte[] data = value.getBytes(StandardCharsets.UTF_8);
return Double.longBitsToDouble(hasher.hash(data, 0, data.length, 0));
} | XXHash64 hasher = XXHashFactory.fastestInstance().hash64(); | public double asDouble() {
byte[] data = value.getBytes(StandardCharsets.UTF_8);
long h = LongHashFunction.xx3().hashBytes(data);
if ((h & 0x7ff0000000000000L) == 0x7ff0000000000000L) {
h = h & 0xffefffffffffffffL;
}
return Double.longBitsToDouble(h);
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} |
We have no tests verifying equal hashing so I think we are fine there. The backend have standardized on XXH3, and are using that for the string repo used for tensors and various other use cases. We probably want to use this for string attributes too in the not too distant future. So it is better that we just break it now. Once XXH3 is available for java too we can start using it there too. | public double asDouble() {
XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
byte[] data = value.getBytes(StandardCharsets.UTF_8);
return Double.longBitsToDouble(hasher.hash(data, 0, data.length, 0));
} | XXHash64 hasher = XXHashFactory.fastestInstance().hash64(); | public double asDouble() {
byte[] data = value.getBytes(StandardCharsets.UTF_8);
long h = LongHashFunction.xx3().hashBytes(data);
if ((h & 0x7ff0000000000000L) == 0x7ff0000000000000L) {
h = h & 0xffefffffffffffffL;
}
return Double.longBitsToDouble(h);
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} |
XXH3 is available not just i the jpountz implementation ... trying it now. | public double asDouble() {
XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
byte[] data = value.getBytes(StandardCharsets.UTF_8);
return Double.longBitsToDouble(hasher.hash(data, 0, data.length, 0));
} | XXHash64 hasher = XXHashFactory.fastestInstance().hash64(); | public double asDouble() {
byte[] data = value.getBytes(StandardCharsets.UTF_8);
long h = LongHashFunction.xx3().hashBytes(data);
if ((h & 0x7ff0000000000000L) == 0x7ff0000000000000L) {
h = h & 0xffefffffffffffffL;
}
return Double.longBitsToDouble(h);
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} |
consider also mentioning -inf/inf | public double asDouble() {
byte[] data = value.getBytes(StandardCharsets.UTF_8);
long h = LongHashFunction.xx3().hashBytes(data);
if ((h & 0x7ff0000000000000L) == 0x7ff0000000000000L) {
h = h & 0xffefffffffffffffL;
}
return Double.longBitsToDouble(h);
} | public double asDouble() {
byte[] data = value.getBytes(StandardCharsets.UTF_8);
long h = LongHashFunction.xx3().hashBytes(data);
if ((h & 0x7ff0000000000000L) == 0x7ff0000000000000L) {
h = h & 0xffefffffffffffffL;
}
return Double.longBitsToDouble(h);
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} | |
I guess we should see where we are using jpountz library and unify usage - later. | public double asDouble() {
XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
byte[] data = value.getBytes(StandardCharsets.UTF_8);
return Double.longBitsToDouble(hasher.hash(data, 0, data.length, 0));
} | XXHash64 hasher = XXHashFactory.fastestInstance().hash64(); | public double asDouble() {
byte[] data = value.getBytes(StandardCharsets.UTF_8);
long h = LongHashFunction.xx3().hashBytes(data);
if ((h & 0x7ff0000000000000L) == 0x7ff0000000000000L) {
h = h & 0xffefffffffffffffL;
}
return Double.longBitsToDouble(h);
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} |
Consider using a `HashMap` + `Map.copyOf()` | private static SearchGroupsImpl toGroups(Collection<Node> nodes, double minActivedocsPercentage) {
ImmutableMap.Builder<Integer, Group> groupsBuilder = new ImmutableMap.Builder<>();
for (Map.Entry<Integer, List<Node>> group : nodes.stream().collect(Collectors.groupingBy(Node::group)).entrySet()) {
Group g = new Group(group.getKey(), group.getValue());
groupsBuilder.put(group.getKey(), g);
}
return new SearchGroupsImpl(groupsBuilder.build(), minActivedocsPercentage);
} | ImmutableMap.Builder<Integer, Group> groupsBuilder = new ImmutableMap.Builder<>(); | private static SearchGroupsImpl toGroups(Collection<Node> nodes, double minActivedocsPercentage) {
Map<Integer, Group> groups = new HashMap<>();
for (Map.Entry<Integer, List<Node>> group : nodes.stream().collect(Collectors.groupingBy(Node::group)).entrySet()) {
Group g = new Group(group.getKey(), group.getValue());
groups.put(group.getKey(), g);
}
return new SearchGroupsImpl(Map.copyOf(groups), minActivedocsPercentage);
} | class SearchCluster implements NodeManager<Node> {
private static final Logger log = Logger.getLogger(SearchCluster.class.getName());
private final String clusterId;
private final VipStatus vipStatus;
private final PingFactory pingFactory;
private final SearchGroupsImpl groups;
private long nextLogTime = 0;
/**
* A search node on this local machine having the entire corpus, which we therefore
* should prefer to dispatch directly to, or empty if there is no such local search node.
* If there is one, we also maintain the VIP status of this container based on the availability
* of the corpus on this local node (up + has coverage), such that this node is taken out of rotation
* if it only queries this cluster when the local node cannot be used, to avoid unnecessary
* cross-node network traffic.
*/
private final Node localCorpusDispatchTarget;
public SearchCluster(String clusterId, double minActivedocsPercentage,
DispatchNodesConfig nodesConfig,
VipStatus vipStatus, PingFactory pingFactory) {
this(clusterId, minActivedocsPercentage, toNodes(nodesConfig), vipStatus, pingFactory);
}
public SearchCluster(String clusterId, double minActivedocsPercentage, List<Node> nodes,
VipStatus vipStatus, PingFactory pingFactory) {
this(clusterId, toGroups(nodes, minActivedocsPercentage), vipStatus, pingFactory);
}
public SearchCluster(String clusterId, SearchGroupsImpl groups, VipStatus vipStatus, PingFactory pingFactory) {
this.clusterId = clusterId;
this.vipStatus = vipStatus;
this.pingFactory = pingFactory;
this.groups = groups;
this.localCorpusDispatchTarget = findLocalCorpusDispatchTarget(HostName.getLocalhost(), groups);
}
@Override
public String name() { return clusterId; }
public void addMonitoring(ClusterMonitor<Node> clusterMonitor) {
for (var group : groups()) {
for (var node : group.nodes())
clusterMonitor.add(node, true);
}
}
private static Node findLocalCorpusDispatchTarget(String selfHostname, SearchGroups groups) {
List<Node> localSearchNodes = groups.groups().stream().flatMap(g -> g.nodes().stream())
.filter(node -> node.hostname().equals(selfHostname))
.toList();
if (localSearchNodes.size() != 1) return null;
Node localSearchNode = localSearchNodes.iterator().next();
Group localSearchGroup = groups.get(localSearchNode.group());
if (localSearchGroup.nodes().size() != 1) return null;
return localSearchNode;
}
private static List<Node> toNodes(DispatchNodesConfig nodesConfig) {
return nodesConfig.node().stream()
.map(n -> new Node(n.key(), n.host(), n.group()))
.toList();
}
public SearchGroups groupList() { return groups; }
public Group group(int id) { return groups.get(id); }
private Collection<Group> groups() { return groups.groups(); }
public int groupsWithSufficientCoverage() {
return (int)groups().stream().filter(Group::hasSufficientCoverage).count();
}
/**
* Returns the single, local node we should dispatch queries directly to,
* or empty if we should not dispatch directly.
*/
public Optional<Node> localCorpusDispatchTarget() {
if ( localCorpusDispatchTarget == null) return Optional.empty();
Group localSearchGroup = groups.get(localCorpusDispatchTarget.group());
if ( ! localSearchGroup.hasSufficientCoverage()) return Optional.empty();
if ( localCorpusDispatchTarget.isWorking() == Boolean.FALSE) return Optional.empty();
return Optional.of(localCorpusDispatchTarget);
}
private void updateWorkingState(Node node, boolean isWorking) {
node.setWorking(isWorking);
updateVipStatusOnNodeChange(node, isWorking);
}
/** Called by the cluster monitor when node state changes to working */
@Override
public void working(Node node) {
updateWorkingState(node, true);
}
/** Called by the cluster monitor when node state changes to failed */
@Override
public void failed(Node node) {
updateWorkingState(node, false);
}
private void updateSufficientCoverage(Group group, boolean sufficientCoverage) {
if (sufficientCoverage == group.hasSufficientCoverage()) return;
group.setHasSufficientCoverage(sufficientCoverage);
updateVipStatusOnCoverageChange(group, sufficientCoverage);
}
private void updateVipStatusOnNodeChange(Node node, boolean nodeIsWorking) {
if (localCorpusDispatchTarget == null) {
if (hasInformationAboutAllNodes())
setInRotationOnlyIf(hasWorkingNodes());
}
else if (usesLocalCorpusIn(node)) {
if (nodeIsWorking || groups().stream().map(Group::nodes).count() > 1)
setInRotationOnlyIf(nodeIsWorking);
}
}
private void updateVipStatusOnCoverageChange(Group group, boolean sufficientCoverage) {
if ( localCorpusDispatchTarget == null) {
}
else if (usesLocalCorpusIn(group)) {
setInRotationOnlyIf(sufficientCoverage);
}
}
private void setInRotationOnlyIf(boolean inRotation) {
if (inRotation)
vipStatus.addToRotation(clusterId);
else
vipStatus.removeFromRotation(clusterId);
}
public boolean hasInformationAboutAllNodes() {
return groups().stream().allMatch(g -> g.nodes().stream().allMatch(node -> node.isWorking() != null));
}
private boolean hasWorkingNodes() {
return groups().stream().anyMatch(g -> g.nodes().stream().anyMatch(node -> node.isWorking() != Boolean.FALSE));
}
private boolean usesLocalCorpusIn(Node node) {
return node.equals(localCorpusDispatchTarget);
}
private boolean usesLocalCorpusIn(Group group) {
return (localCorpusDispatchTarget != null) && localCorpusDispatchTarget.group() == group.id();
}
/** Used by the cluster monitor to manage node status */
@Override
public void ping(ClusterMonitor clusterMonitor, Node node, Executor executor) {
Pinger pinger = pingFactory.createPinger(node, clusterMonitor, new PongCallback(node, clusterMonitor));
pinger.ping();
}
private void pingIterationCompletedSingleGroup() {
Group group = groups().iterator().next();
group.aggregateNodeValues();
updateSufficientCoverage(group, true);
boolean sufficientCoverage = groups.isGroupCoverageSufficient(group.activeDocuments(), group.activeDocuments());
trackGroupCoverageChanges(group, sufficientCoverage, group.activeDocuments());
}
private void pingIterationCompletedMultipleGroups() {
groups().forEach(Group::aggregateNodeValues);
long medianDocuments = groups.medianDocumentsPerGroup();
for (Group group : groups()) {
boolean sufficientCoverage = groups.isGroupCoverageSufficient(group.activeDocuments(), medianDocuments);
updateSufficientCoverage(group, sufficientCoverage);
trackGroupCoverageChanges(group, sufficientCoverage, medianDocuments);
}
}
/**
* Update statistics after a round of issuing pings.
* Note that this doesn't wait for pings to return, so it will typically accumulate data from
* last rounds pinging, or potentially (although unlikely) some combination of new and old data.
*/
@Override
public void pingIterationCompleted() {
if (groups.size() == 1) {
pingIterationCompletedSingleGroup();
} else {
pingIterationCompletedMultipleGroups();
}
}
/**
* Calculate whether a subset of nodes in a group has enough coverage
*/
private void trackGroupCoverageChanges(Group group, boolean fullCoverage, long medianDocuments) {
if ( ! hasInformationAboutAllNodes()) return;
boolean changed = group.fullCoverageStatusChanged(fullCoverage);
if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) {
nextLogTime = System.currentTimeMillis() + 30 * 1000;
if (fullCoverage) {
log.info("Cluster " + clusterId + ": " + group + " has full coverage. " +
"Active documents: " + group.activeDocuments() + "/" + medianDocuments + ", " +
"Target active documents: " + group.targetActiveDocuments() + ", " +
"working nodes: " + group.workingNodes() + "/" + group.nodes().size());
} else {
StringBuilder unresponsive = new StringBuilder();
for (var node : group.nodes()) {
if (node.isWorking() != Boolean.TRUE)
unresponsive.append('\n').append(node);
}
log.warning("Cluster " + clusterId + ": " + group + " has reduced coverage: " +
"Active documents: " + group.activeDocuments() + "/" + medianDocuments + ", " +
"Target active documents: " + group.targetActiveDocuments() + ", " +
"working nodes: " + group.workingNodes() + "/" + group.nodes().size() +
", unresponsive nodes: " + (unresponsive.toString().isEmpty() ? " none" : unresponsive));
}
}
}
private static class PongCallback implements PongHandler {
private final ClusterMonitor<Node> clusterMonitor;
private final Node node;
PongCallback(Node node, ClusterMonitor<Node> clusterMonitor) {
this.node = node;
this.clusterMonitor = clusterMonitor;
}
@Override
public void handle(Pong pong) {
if (pong.badResponse()) {
clusterMonitor.failed(node, pong.error().get());
} else {
if (pong.activeDocuments().isPresent()) {
node.setActiveDocuments(pong.activeDocuments().get());
node.setTargetActiveDocuments(pong.targetActiveDocuments().get());
node.setBlockingWrites(pong.isBlockingWrites());
}
clusterMonitor.responded(node);
}
}
}
} | class SearchCluster implements NodeManager<Node> {
private static final Logger log = Logger.getLogger(SearchCluster.class.getName());
private final String clusterId;
private final VipStatus vipStatus;
private final PingFactory pingFactory;
private final SearchGroupsImpl groups;
private volatile long nextLogTime = 0;
/**
* A search node on this local machine having the entire corpus, which we therefore
* should prefer to dispatch directly to, or empty if there is no such local search node.
* If there is one, we also maintain the VIP status of this container based on the availability
* of the corpus on this local node (up + has coverage), such that this node is taken out of rotation
* if it only queries this cluster when the local node cannot be used, to avoid unnecessary
* cross-node network traffic.
*/
private final Node localCorpusDispatchTarget;
public SearchCluster(String clusterId, double minActivedocsPercentage,
DispatchNodesConfig nodesConfig,
VipStatus vipStatus, PingFactory pingFactory) {
this(clusterId, minActivedocsPercentage, toNodes(nodesConfig), vipStatus, pingFactory);
}
public SearchCluster(String clusterId, double minActivedocsPercentage, List<Node> nodes,
VipStatus vipStatus, PingFactory pingFactory) {
this(clusterId, toGroups(nodes, minActivedocsPercentage), vipStatus, pingFactory);
}
public SearchCluster(String clusterId, SearchGroupsImpl groups, VipStatus vipStatus, PingFactory pingFactory) {
this.clusterId = clusterId;
this.vipStatus = vipStatus;
this.pingFactory = pingFactory;
this.groups = groups;
this.localCorpusDispatchTarget = findLocalCorpusDispatchTarget(HostName.getLocalhost(), groups);
}
@Override
public String name() { return clusterId; }
public void addMonitoring(ClusterMonitor<Node> clusterMonitor) {
for (var group : groups()) {
for (var node : group.nodes())
clusterMonitor.add(node, true);
}
}
private static Node findLocalCorpusDispatchTarget(String selfHostname, SearchGroups groups) {
List<Node> localSearchNodes = groups.groups().stream().flatMap(g -> g.nodes().stream())
.filter(node -> node.hostname().equals(selfHostname))
.toList();
if (localSearchNodes.size() != 1) return null;
Node localSearchNode = localSearchNodes.iterator().next();
Group localSearchGroup = groups.get(localSearchNode.group());
if (localSearchGroup.nodes().size() != 1) return null;
return localSearchNode;
}
private static List<Node> toNodes(DispatchNodesConfig nodesConfig) {
return nodesConfig.node().stream()
.map(n -> new Node(n.key(), n.host(), n.group()))
.toList();
}
public SearchGroups groupList() { return groups; }
public Group group(int id) { return groups.get(id); }
private Collection<Group> groups() { return groups.groups(); }
public int groupsWithSufficientCoverage() {
return (int)groups().stream().filter(Group::hasSufficientCoverage).count();
}
/**
* Returns the single, local node we should dispatch queries directly to,
* or empty if we should not dispatch directly.
*/
public Optional<Node> localCorpusDispatchTarget() {
if ( localCorpusDispatchTarget == null) return Optional.empty();
Group localSearchGroup = groups.get(localCorpusDispatchTarget.group());
if ( ! localSearchGroup.hasSufficientCoverage()) return Optional.empty();
if ( localCorpusDispatchTarget.isWorking() == Boolean.FALSE) return Optional.empty();
return Optional.of(localCorpusDispatchTarget);
}
private void updateWorkingState(Node node, boolean isWorking) {
node.setWorking(isWorking);
updateVipStatusOnNodeChange(node, isWorking);
}
/** Called by the cluster monitor when node state changes to working */
@Override
public void working(Node node) {
updateWorkingState(node, true);
}
/** Called by the cluster monitor when node state changes to failed */
@Override
public void failed(Node node) {
updateWorkingState(node, false);
}
private void updateSufficientCoverage(Group group, boolean sufficientCoverage) {
if (sufficientCoverage == group.hasSufficientCoverage()) return;
group.setHasSufficientCoverage(sufficientCoverage);
updateVipStatusOnCoverageChange(group, sufficientCoverage);
}
private void updateVipStatusOnNodeChange(Node node, boolean nodeIsWorking) {
if (localCorpusDispatchTarget == null) {
if (hasInformationAboutAllNodes())
setInRotationOnlyIf(hasWorkingNodes());
}
else if (usesLocalCorpusIn(node)) {
if (nodeIsWorking || groups().stream().map(Group::nodes).count() > 1)
setInRotationOnlyIf(nodeIsWorking);
}
}
private void updateVipStatusOnCoverageChange(Group group, boolean sufficientCoverage) {
if ( localCorpusDispatchTarget == null) {
}
else if (usesLocalCorpusIn(group)) {
setInRotationOnlyIf(sufficientCoverage);
}
}
private void setInRotationOnlyIf(boolean inRotation) {
if (inRotation)
vipStatus.addToRotation(clusterId);
else
vipStatus.removeFromRotation(clusterId);
}
public boolean hasInformationAboutAllNodes() {
return groups().stream().allMatch(g -> g.nodes().stream().allMatch(node -> node.isWorking() != null));
}
private boolean hasWorkingNodes() {
return groups().stream().anyMatch(g -> g.nodes().stream().anyMatch(node -> node.isWorking() != Boolean.FALSE));
}
private boolean usesLocalCorpusIn(Node node) {
return node.equals(localCorpusDispatchTarget);
}
private boolean usesLocalCorpusIn(Group group) {
return (localCorpusDispatchTarget != null) && localCorpusDispatchTarget.group() == group.id();
}
/** Used by the cluster monitor to manage node status */
@Override
public void ping(ClusterMonitor clusterMonitor, Node node, Executor executor) {
Pinger pinger = pingFactory.createPinger(node, clusterMonitor, new PongCallback(node, clusterMonitor));
pinger.ping();
}
private void pingIterationCompletedSingleGroup() {
Group group = groups().iterator().next();
group.aggregateNodeValues();
updateSufficientCoverage(group, true);
boolean sufficientCoverage = groups.isGroupCoverageSufficient(group.activeDocuments(), group.activeDocuments());
trackGroupCoverageChanges(group, sufficientCoverage, group.activeDocuments());
}
private void pingIterationCompletedMultipleGroups() {
groups().forEach(Group::aggregateNodeValues);
long medianDocuments = groups.medianDocumentsPerGroup();
for (Group group : groups()) {
boolean sufficientCoverage = groups.isGroupCoverageSufficient(group.activeDocuments(), medianDocuments);
updateSufficientCoverage(group, sufficientCoverage);
trackGroupCoverageChanges(group, sufficientCoverage, medianDocuments);
}
}
/**
* Update statistics after a round of issuing pings.
* Note that this doesn't wait for pings to return, so it will typically accumulate data from
* last rounds pinging, or potentially (although unlikely) some combination of new and old data.
*/
@Override
public void pingIterationCompleted() {
if (groups.size() == 1) {
pingIterationCompletedSingleGroup();
} else {
pingIterationCompletedMultipleGroups();
}
}
/**
* Calculate whether a subset of nodes in a group has enough coverage
*/
private void trackGroupCoverageChanges(Group group, boolean fullCoverage, long medianDocuments) {
if ( ! hasInformationAboutAllNodes()) return;
boolean changed = group.fullCoverageStatusChanged(fullCoverage);
if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) {
nextLogTime = System.currentTimeMillis() + 30 * 1000;
if (fullCoverage) {
log.info("Cluster " + clusterId + ": " + group + " has full coverage. " +
"Active documents: " + group.activeDocuments() + "/" + medianDocuments + ", " +
"Target active documents: " + group.targetActiveDocuments() + ", " +
"working nodes: " + group.workingNodes() + "/" + group.nodes().size());
} else {
StringBuilder unresponsive = new StringBuilder();
for (var node : group.nodes()) {
if (node.isWorking() != Boolean.TRUE)
unresponsive.append('\n').append(node);
}
log.warning("Cluster " + clusterId + ": " + group + " has reduced coverage: " +
"Active documents: " + group.activeDocuments() + "/" + medianDocuments + ", " +
"Target active documents: " + group.targetActiveDocuments() + ", " +
"working nodes: " + group.workingNodes() + "/" + group.nodes().size() +
", unresponsive nodes: " + (unresponsive.toString().isEmpty() ? " none" : unresponsive));
}
}
}
private static class PongCallback implements PongHandler {
private final ClusterMonitor<Node> clusterMonitor;
private final Node node;
PongCallback(Node node, ClusterMonitor<Node> clusterMonitor) {
this.node = node;
this.clusterMonitor = clusterMonitor;
}
@Override
public void handle(Pong pong) {
if (pong.badResponse()) {
clusterMonitor.failed(node, pong.error().get());
} else {
if (pong.activeDocuments().isPresent()) {
node.setActiveDocuments(pong.activeDocuments().get());
node.setTargetActiveDocuments(pong.targetActiveDocuments().get());
node.setBlockingWrites(pong.isBlockingWrites());
}
clusterMonitor.responded(node);
}
}
}
} |
Comment updated in #24951 | public double asDouble() {
byte[] data = value.getBytes(StandardCharsets.UTF_8);
long h = LongHashFunction.xx3().hashBytes(data);
if ((h & 0x7ff0000000000000L) == 0x7ff0000000000000L) {
h = h & 0xffefffffffffffffL;
}
return Double.longBitsToDouble(h);
} | public double asDouble() {
byte[] data = value.getBytes(StandardCharsets.UTF_8);
long h = LongHashFunction.xx3().hashBytes(data);
if ((h & 0x7ff0000000000000L) == 0x7ff0000000000000L) {
h = h & 0xffefffffffffffffL;
}
return Double.longBitsToDouble(h);
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} | class StringValue extends Value {
private final String value;
/**
* Create a string value which is frozen at the outset.
*/
public static StringValue frozen(String value) {
StringValue stringValue=new StringValue(value);
stringValue.freeze();
return stringValue;
}
public StringValue(String value) {
this.value = UnicodeUtilities.unquote(value);
}
@Override
public TensorType type() { return TensorType.empty; }
/**
* Returns the XXHash hashcode of this, to enable strings to be encoded (with reasonable safely)
* as doubles for optimization.
*/
@Override
@Override
public Tensor asTensor() {
return doubleAsTensor(asDouble());
}
@Override
public boolean hasDouble() { return true; }
@Override
public boolean asBoolean() {
throw new UnsupportedOperationException("A string value ('" + value + "') does not have a boolean value");
}
@Override
public Value negate() {
throw new UnsupportedOperationException("A string value ('" + value + "') cannot be negated");
}
@Override
public Value not() {
throw new UnsupportedOperationException("String values ('" + value + "') do not support not");
}
@Override
public Value or(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support or");
}
@Override
public Value and(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support and");
}
@Override
public Value largerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greaterEqual");
}
@Override
public Value larger(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support greater");
}
@Override
public Value smallerOrEqual(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support lessEqual");
}
@Override
public Value smaller(Value argument) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support less");
}
@Override
public Value approxEqual(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value notEqual(Value argument) {
return new BooleanValue(this.asDouble() != argument.asDouble());
}
@Override
public Value equal(Value argument) {
return new BooleanValue(this.asDouble() == argument.asDouble());
}
@Override
public Value add(Value value) {
return new StringValue(value + value.toString());
}
@Override
public Value subtract(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support subtraction");
}
@Override
public Value multiply(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support multiplication");
}
@Override
public Value divide(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support division");
}
@Override
public Value modulo(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support modulo");
}
@Override
public Value power(Value value) {
throw new UnsupportedOperationException("String values ('" + value + "') do not support ^");
}
@Override
public Value function(Function function, Value value) {
throw new UnsupportedOperationException("Mathematical functions cannot be applied on strings ('" + value + "')");
}
@Override
public Value asMutable() {
if ( ! isFrozen()) return this;
return new StringValue(value);
}
@Override
public String toString() {
return UnicodeUtilities.quote(value, '"');
}
@Override
public boolean equals(Object other) {
if (this==other) return true;
if ( ! (other instanceof StringValue)) return false;
return ((StringValue)other).value.equals(this.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
/** Returns the value of this as a string */
public String asString() { return value; }
} | |
Use dashboardUri? | public Mail mailOf(FormattedNotification content, Collection<String> recipients) {
var notification = content.notification();
var subject = Text.format("[%s] %s Vespa Notification for %s", notification.level().toString().toUpperCase(), content.prettyType(), applicationIdSource(notification.source()));
var template = uncheck(() -> Notifier.class.getResourceAsStream("/mail/mail-notification.tmpl").readAllBytes());
var notificationsUri = new UriBuilder(dashboardUri)
.append("tenant/")
.append(content.notification().source().tenant().value())
.append("account/notifications")
.toURI()
.toString();
var html = new String(template)
.replace("[[NOTIFICATION_HEADER]]", content.messagePrefix())
.replace("[[NOTIFICATION_ITEMS]]", notification.messages().stream()
.map(Notifier::linkify)
.map(m -> "<li>" + m + "</li>")
.collect(Collectors.joining()))
.replace("[[LINK_TO_ACCOUNT_NOTIFICATIONS]]", notificationsUri)
.replace("[[LINK_TO_PRIVACY_POLICY]]", "https:
.replace("[[LINK_TO_TERMS_OF_SERVICE]]", "https:
.replace("[[LINK_TO_SUPPORT]]", "https:
return new Mail(recipients, subject, "", html);
} | .replace("[[LINK_TO_TERMS_OF_SERVICE]]", "https: | public Mail mailOf(FormattedNotification content, Collection<String> recipients) {
var notification = content.notification();
var subject = Text.format("[%s] %s Vespa Notification for %s", notification.level().toString().toUpperCase(), content.prettyType(), applicationIdSource(notification.source()));
var template = uncheck(() -> Notifier.class.getResourceAsStream("/mail/mail-notification.tmpl").readAllBytes());
var html = new String(template)
.replace("[[NOTIFICATION_HEADER]]", content.messagePrefix())
.replace("[[NOTIFICATION_ITEMS]]", notification.messages().stream()
.map(Notifier::linkify)
.map(m -> "<li>" + m + "</li>")
.collect(Collectors.joining()))
.replace("[[LINK_TO_ACCOUNT_NOTIFICATIONS]]", accountNotificationsUri(content.notification().source().tenant()))
.replace("[[LINK_TO_PRIVACY_POLICY]]", "https:
.replace("[[LINK_TO_TERMS_OF_SERVICE]]", consoleUri("terms-of-service-trial.html"))
.replace("[[LINK_TO_SUPPORT]]", consoleUri("support"));
return new Mail(recipients, subject, "", html);
} | class Notifier {
private static final String header = """
<div style="background:
<img
src="https:
style="width: auto; height: 34px; margin: 10px"
/>
</div>
<br>
""";
private final CuratorDb curatorDb;
private final Mailer mailer;
private final FlagSource flagSource;
private final NotificationFormatter formatter;
private final URI dashboardUri;
private static final Logger log = Logger.getLogger(Notifier.class.getName());
private static final Pattern urlPattern = Pattern.compile("https:
public Notifier(CuratorDb curatorDb, ZoneRegistry zoneRegistry, Mailer mailer, FlagSource flagSource) {
this.curatorDb = Objects.requireNonNull(curatorDb);
this.mailer = Objects.requireNonNull(mailer);
this.flagSource = Objects.requireNonNull(flagSource);
this.formatter = new NotificationFormatter(zoneRegistry);
this.dashboardUri = zoneRegistry.dashboardUrl();
}
public void dispatch(List<Notification> notifications, NotificationSource source) {
if (!dispatchEnabled(source) || skipSource(source)) {
return;
}
if (notifications.isEmpty()) {
return;
}
var tenant = curatorDb.readTenant(source.tenant());
tenant.stream().forEach(t -> {
if (t instanceof CloudTenant) {
var ct = (CloudTenant) t;
ct.info().contacts().all().stream()
.filter(c -> c.audiences().contains(TenantContacts.Audience.NOTIFICATIONS))
.collect(Collectors.groupingBy(TenantContacts.Contact::type, Collectors.toList()))
.entrySet()
.forEach(e -> notifications.forEach(n -> dispatch(n, e.getKey(), e.getValue())));
}
});
}
public void dispatch(Notification notification) {
dispatch(List.of(notification), notification.source());
}
private boolean dispatchEnabled(NotificationSource source) {
return Flags.NOTIFICATION_DISPATCH_FLAG.bindTo(flagSource)
.with(FetchVector.Dimension.TENANT_ID, source.tenant().value())
.value();
}
private boolean skipSource(NotificationSource source) {
if (source.zoneId()
.map(z -> z.environment())
.map(e -> e == Environment.dev || e == Environment.perf)
.orElse(false)) {
return true;
}
return false;
}
private void dispatch(Notification notification, TenantContacts.Type type, Collection<? extends TenantContacts.Contact> contacts) {
switch (type) {
case EMAIL:
dispatch(notification, contacts.stream().map(c -> (TenantContacts.EmailContact) c).collect(Collectors.toList()));
break;
default:
throw new IllegalArgumentException("Unknown TenantContacts type " + type.name());
}
}
private void dispatch(Notification notification, Collection<TenantContacts.EmailContact> contacts) {
try {
var content = formatter.format(notification);
mailer.send(mailOf(content, contacts.stream()
.filter(c -> c.email().isVerified())
.map(c -> c.email().getEmailAddress())
.collect(Collectors.toList())));
} catch (MailerException e) {
log.log(Level.SEVERE, "Failed sending email", e);
} catch (MissingOptionalException e) {
log.log(Level.WARNING, "Missing value in required field '" + e.field() + "' for notification type: " + notification.type(), e);
}
}
@VisibleForTesting
static String linkify(String text) {
return urlPattern.matcher(text).replaceAll((res) -> String.format("<a href=\"%s\">%s</a>", res.group(), res.group()));
}
private String applicationIdSource(NotificationSource source) {
StringBuilder sb = new StringBuilder();
sb.append(source.tenant().value());
source.application().ifPresent(applicationName -> sb.append(".").append(applicationName.value()));
source.instance().ifPresent(instanceName -> sb.append(".").append(instanceName.value()));
return sb.toString();
}
} | class Notifier {
private static final String header = """
<div style="background:
<img
src="https:
style="width: auto; height: 34px; margin: 10px"
/>
</div>
<br>
""";
private final CuratorDb curatorDb;
private final Mailer mailer;
private final FlagSource flagSource;
private final NotificationFormatter formatter;
private final URI dashboardUri;
private static final Logger log = Logger.getLogger(Notifier.class.getName());
private static final Pattern urlPattern = Pattern.compile("https:
public Notifier(CuratorDb curatorDb, ZoneRegistry zoneRegistry, Mailer mailer, FlagSource flagSource) {
this.curatorDb = Objects.requireNonNull(curatorDb);
this.mailer = Objects.requireNonNull(mailer);
this.flagSource = Objects.requireNonNull(flagSource);
this.formatter = new NotificationFormatter(zoneRegistry);
this.dashboardUri = zoneRegistry.dashboardUrl();
}
public void dispatch(List<Notification> notifications, NotificationSource source) {
if (!dispatchEnabled(source) || skipSource(source)) {
return;
}
if (notifications.isEmpty()) {
return;
}
var tenant = curatorDb.readTenant(source.tenant());
tenant.stream().forEach(t -> {
if (t instanceof CloudTenant) {
var ct = (CloudTenant) t;
ct.info().contacts().all().stream()
.filter(c -> c.audiences().contains(TenantContacts.Audience.NOTIFICATIONS))
.collect(Collectors.groupingBy(TenantContacts.Contact::type, Collectors.toList()))
.entrySet()
.forEach(e -> notifications.forEach(n -> dispatch(n, e.getKey(), e.getValue())));
}
});
}
public void dispatch(Notification notification) {
dispatch(List.of(notification), notification.source());
}
private boolean dispatchEnabled(NotificationSource source) {
return Flags.NOTIFICATION_DISPATCH_FLAG.bindTo(flagSource)
.with(FetchVector.Dimension.TENANT_ID, source.tenant().value())
.value();
}
private boolean skipSource(NotificationSource source) {
if (source.zoneId()
.map(z -> z.environment())
.map(e -> e == Environment.dev || e == Environment.perf)
.orElse(false)) {
return true;
}
return false;
}
private void dispatch(Notification notification, TenantContacts.Type type, Collection<? extends TenantContacts.Contact> contacts) {
switch (type) {
case EMAIL:
dispatch(notification, contacts.stream().map(c -> (TenantContacts.EmailContact) c).collect(Collectors.toList()));
break;
default:
throw new IllegalArgumentException("Unknown TenantContacts type " + type.name());
}
}
private void dispatch(Notification notification, Collection<TenantContacts.EmailContact> contacts) {
try {
var content = formatter.format(notification);
mailer.send(mailOf(content, contacts.stream()
.filter(c -> c.email().isVerified())
.map(c -> c.email().getEmailAddress())
.collect(Collectors.toList())));
} catch (MailerException e) {
log.log(Level.SEVERE, "Failed sending email", e);
} catch (MissingOptionalException e) {
log.log(Level.WARNING, "Missing value in required field '" + e.field() + "' for notification type: " + notification.type(), e);
}
}
@VisibleForTesting
static String linkify(String text) {
return urlPattern.matcher(text).replaceAll((res) -> String.format("<a href=\"%s\">%s</a>", res.group(), res.group()));
}
private String applicationIdSource(NotificationSource source) {
StringBuilder sb = new StringBuilder();
sb.append(source.tenant().value());
source.application().ifPresent(applicationName -> sb.append(".").append(applicationName.value()));
source.instance().ifPresent(instanceName -> sb.append(".").append(instanceName.value()));
return sb.toString();
}
private String accountNotificationsUri(TenantName tenant) {
return new UriBuilder(dashboardUri)
.append("tenant/")
.append(tenant.value())
.append("account/notifications")
.toString();
}
private String consoleUri(String path) {
return new UriBuilder(dashboardUri).append(path).toString();
}
} |
Nice catch | public Mail mailOf(FormattedNotification content, Collection<String> recipients) {
var notification = content.notification();
var subject = Text.format("[%s] %s Vespa Notification for %s", notification.level().toString().toUpperCase(), content.prettyType(), applicationIdSource(notification.source()));
var template = uncheck(() -> Notifier.class.getResourceAsStream("/mail/mail-notification.tmpl").readAllBytes());
var notificationsUri = new UriBuilder(dashboardUri)
.append("tenant/")
.append(content.notification().source().tenant().value())
.append("account/notifications")
.toURI()
.toString();
var html = new String(template)
.replace("[[NOTIFICATION_HEADER]]", content.messagePrefix())
.replace("[[NOTIFICATION_ITEMS]]", notification.messages().stream()
.map(Notifier::linkify)
.map(m -> "<li>" + m + "</li>")
.collect(Collectors.joining()))
.replace("[[LINK_TO_ACCOUNT_NOTIFICATIONS]]", notificationsUri)
.replace("[[LINK_TO_PRIVACY_POLICY]]", "https:
.replace("[[LINK_TO_TERMS_OF_SERVICE]]", "https:
.replace("[[LINK_TO_SUPPORT]]", "https:
return new Mail(recipients, subject, "", html);
} | .replace("[[LINK_TO_TERMS_OF_SERVICE]]", "https: | public Mail mailOf(FormattedNotification content, Collection<String> recipients) {
var notification = content.notification();
var subject = Text.format("[%s] %s Vespa Notification for %s", notification.level().toString().toUpperCase(), content.prettyType(), applicationIdSource(notification.source()));
var template = uncheck(() -> Notifier.class.getResourceAsStream("/mail/mail-notification.tmpl").readAllBytes());
var html = new String(template)
.replace("[[NOTIFICATION_HEADER]]", content.messagePrefix())
.replace("[[NOTIFICATION_ITEMS]]", notification.messages().stream()
.map(Notifier::linkify)
.map(m -> "<li>" + m + "</li>")
.collect(Collectors.joining()))
.replace("[[LINK_TO_ACCOUNT_NOTIFICATIONS]]", accountNotificationsUri(content.notification().source().tenant()))
.replace("[[LINK_TO_PRIVACY_POLICY]]", "https:
.replace("[[LINK_TO_TERMS_OF_SERVICE]]", consoleUri("terms-of-service-trial.html"))
.replace("[[LINK_TO_SUPPORT]]", consoleUri("support"));
return new Mail(recipients, subject, "", html);
} | class Notifier {
private static final String header = """
<div style="background:
<img
src="https:
style="width: auto; height: 34px; margin: 10px"
/>
</div>
<br>
""";
private final CuratorDb curatorDb;
private final Mailer mailer;
private final FlagSource flagSource;
private final NotificationFormatter formatter;
private final URI dashboardUri;
private static final Logger log = Logger.getLogger(Notifier.class.getName());
private static final Pattern urlPattern = Pattern.compile("https:
public Notifier(CuratorDb curatorDb, ZoneRegistry zoneRegistry, Mailer mailer, FlagSource flagSource) {
this.curatorDb = Objects.requireNonNull(curatorDb);
this.mailer = Objects.requireNonNull(mailer);
this.flagSource = Objects.requireNonNull(flagSource);
this.formatter = new NotificationFormatter(zoneRegistry);
this.dashboardUri = zoneRegistry.dashboardUrl();
}
public void dispatch(List<Notification> notifications, NotificationSource source) {
if (!dispatchEnabled(source) || skipSource(source)) {
return;
}
if (notifications.isEmpty()) {
return;
}
var tenant = curatorDb.readTenant(source.tenant());
tenant.stream().forEach(t -> {
if (t instanceof CloudTenant) {
var ct = (CloudTenant) t;
ct.info().contacts().all().stream()
.filter(c -> c.audiences().contains(TenantContacts.Audience.NOTIFICATIONS))
.collect(Collectors.groupingBy(TenantContacts.Contact::type, Collectors.toList()))
.entrySet()
.forEach(e -> notifications.forEach(n -> dispatch(n, e.getKey(), e.getValue())));
}
});
}
public void dispatch(Notification notification) {
dispatch(List.of(notification), notification.source());
}
private boolean dispatchEnabled(NotificationSource source) {
return Flags.NOTIFICATION_DISPATCH_FLAG.bindTo(flagSource)
.with(FetchVector.Dimension.TENANT_ID, source.tenant().value())
.value();
}
private boolean skipSource(NotificationSource source) {
if (source.zoneId()
.map(z -> z.environment())
.map(e -> e == Environment.dev || e == Environment.perf)
.orElse(false)) {
return true;
}
return false;
}
private void dispatch(Notification notification, TenantContacts.Type type, Collection<? extends TenantContacts.Contact> contacts) {
switch (type) {
case EMAIL:
dispatch(notification, contacts.stream().map(c -> (TenantContacts.EmailContact) c).collect(Collectors.toList()));
break;
default:
throw new IllegalArgumentException("Unknown TenantContacts type " + type.name());
}
}
private void dispatch(Notification notification, Collection<TenantContacts.EmailContact> contacts) {
try {
var content = formatter.format(notification);
mailer.send(mailOf(content, contacts.stream()
.filter(c -> c.email().isVerified())
.map(c -> c.email().getEmailAddress())
.collect(Collectors.toList())));
} catch (MailerException e) {
log.log(Level.SEVERE, "Failed sending email", e);
} catch (MissingOptionalException e) {
log.log(Level.WARNING, "Missing value in required field '" + e.field() + "' for notification type: " + notification.type(), e);
}
}
@VisibleForTesting
static String linkify(String text) {
return urlPattern.matcher(text).replaceAll((res) -> String.format("<a href=\"%s\">%s</a>", res.group(), res.group()));
}
private String applicationIdSource(NotificationSource source) {
StringBuilder sb = new StringBuilder();
sb.append(source.tenant().value());
source.application().ifPresent(applicationName -> sb.append(".").append(applicationName.value()));
source.instance().ifPresent(instanceName -> sb.append(".").append(instanceName.value()));
return sb.toString();
}
} | class Notifier {
private static final String header = """
<div style="background:
<img
src="https:
style="width: auto; height: 34px; margin: 10px"
/>
</div>
<br>
""";
private final CuratorDb curatorDb;
private final Mailer mailer;
private final FlagSource flagSource;
private final NotificationFormatter formatter;
private final URI dashboardUri;
private static final Logger log = Logger.getLogger(Notifier.class.getName());
private static final Pattern urlPattern = Pattern.compile("https:
public Notifier(CuratorDb curatorDb, ZoneRegistry zoneRegistry, Mailer mailer, FlagSource flagSource) {
this.curatorDb = Objects.requireNonNull(curatorDb);
this.mailer = Objects.requireNonNull(mailer);
this.flagSource = Objects.requireNonNull(flagSource);
this.formatter = new NotificationFormatter(zoneRegistry);
this.dashboardUri = zoneRegistry.dashboardUrl();
}
public void dispatch(List<Notification> notifications, NotificationSource source) {
if (!dispatchEnabled(source) || skipSource(source)) {
return;
}
if (notifications.isEmpty()) {
return;
}
var tenant = curatorDb.readTenant(source.tenant());
tenant.stream().forEach(t -> {
if (t instanceof CloudTenant) {
var ct = (CloudTenant) t;
ct.info().contacts().all().stream()
.filter(c -> c.audiences().contains(TenantContacts.Audience.NOTIFICATIONS))
.collect(Collectors.groupingBy(TenantContacts.Contact::type, Collectors.toList()))
.entrySet()
.forEach(e -> notifications.forEach(n -> dispatch(n, e.getKey(), e.getValue())));
}
});
}
public void dispatch(Notification notification) {
dispatch(List.of(notification), notification.source());
}
private boolean dispatchEnabled(NotificationSource source) {
return Flags.NOTIFICATION_DISPATCH_FLAG.bindTo(flagSource)
.with(FetchVector.Dimension.TENANT_ID, source.tenant().value())
.value();
}
private boolean skipSource(NotificationSource source) {
if (source.zoneId()
.map(z -> z.environment())
.map(e -> e == Environment.dev || e == Environment.perf)
.orElse(false)) {
return true;
}
return false;
}
private void dispatch(Notification notification, TenantContacts.Type type, Collection<? extends TenantContacts.Contact> contacts) {
switch (type) {
case EMAIL:
dispatch(notification, contacts.stream().map(c -> (TenantContacts.EmailContact) c).collect(Collectors.toList()));
break;
default:
throw new IllegalArgumentException("Unknown TenantContacts type " + type.name());
}
}
private void dispatch(Notification notification, Collection<TenantContacts.EmailContact> contacts) {
try {
var content = formatter.format(notification);
mailer.send(mailOf(content, contacts.stream()
.filter(c -> c.email().isVerified())
.map(c -> c.email().getEmailAddress())
.collect(Collectors.toList())));
} catch (MailerException e) {
log.log(Level.SEVERE, "Failed sending email", e);
} catch (MissingOptionalException e) {
log.log(Level.WARNING, "Missing value in required field '" + e.field() + "' for notification type: " + notification.type(), e);
}
}
@VisibleForTesting
static String linkify(String text) {
return urlPattern.matcher(text).replaceAll((res) -> String.format("<a href=\"%s\">%s</a>", res.group(), res.group()));
}
private String applicationIdSource(NotificationSource source) {
StringBuilder sb = new StringBuilder();
sb.append(source.tenant().value());
source.application().ifPresent(applicationName -> sb.append(".").append(applicationName.value()));
source.instance().ifPresent(instanceName -> sb.append(".").append(instanceName.value()));
return sb.toString();
}
private String accountNotificationsUri(TenantName tenant) {
return new UriBuilder(dashboardUri)
.append("tenant/")
.append(tenant.value())
.append("account/notifications")
.toString();
}
private String consoleUri(String path) {
return new UriBuilder(dashboardUri).append(path).toString();
}
} |
Division on zero possible if `isEmpty()` returns `true`? | public boolean isGroupCoverageSufficient(long activeDocuments, long medianDocuments) {
double documentCoverage = 100.0 * (double) activeDocuments / medianDocuments;
return ! (medianDocuments > 0 && documentCoverage < minActivedocsPercentage);
} | double documentCoverage = 100.0 * (double) activeDocuments / medianDocuments; | public boolean isGroupCoverageSufficient(long activeDocuments, long medianDocuments) {
if (medianDocuments <= 0) return true;
double documentCoverage = 100.0 * (double) activeDocuments / medianDocuments;
return documentCoverage >= minActivedocsPercentage;
} | class SearchGroupsImpl implements SearchGroups {
private final Map<Integer, Group> groups;
private final double minActivedocsPercentage;
public SearchGroupsImpl(Map<Integer, Group> groups, double minActivedocsPercentage) {
this.groups = Map.copyOf(groups);
this.minActivedocsPercentage = minActivedocsPercentage;
}
@Override public Group get(int id) { return groups.get(id); }
@Override public Set<Integer> keys() { return groups.keySet();}
@Override public Collection<Group> groups() { return groups.values(); }
@Override public int size() { return groups.size(); }
@Override
public boolean isPartialGroupCoverageSufficient(Collection<Node> nodes) {
if (size() == 1)
return true;
long activeDocuments = nodes.stream().mapToLong(Node::getActiveDocuments).sum();
return isGroupCoverageSufficient(activeDocuments, medianDocumentsPerGroup());
}
public long medianDocumentsPerGroup() {
if (isEmpty()) return 0;
var activeDocuments = groups().stream().map(Group::activeDocuments).collect(Collectors.toList());
return (long) Quantiles.median().compute(activeDocuments);
}
public static SearchGroupsImpl buildGroupListForTest(int numGroups, int nodesPerGroup, double minActivedocsPercentage) {
return new SearchGroupsImpl(buildGroupMapForTest(numGroups, nodesPerGroup), minActivedocsPercentage);
}
public static Map<Integer, Group> buildGroupMapForTest(int numGroups, int nodesPerGroup) {
Map<Integer, Group> groups = new HashMap<>();
int distributionKey = 0;
for (int group = 0; group < numGroups; group++) {
List<Node> groupNodes = new ArrayList<>();
for (int i = 0; i < nodesPerGroup; i++) {
Node node = new Node(distributionKey, "host" + distributionKey, group);
node.setWorking(true);
groupNodes.add(node);
distributionKey++;
}
Group g = new Group(group, groupNodes);
groups.put(group, g);
}
return Map.copyOf(groups);
}
} | class SearchGroupsImpl implements SearchGroups {
private final Map<Integer, Group> groups;
private final double minActivedocsPercentage;
public SearchGroupsImpl(Map<Integer, Group> groups, double minActivedocsPercentage) {
this.groups = Map.copyOf(groups);
this.minActivedocsPercentage = minActivedocsPercentage;
}
@Override public Group get(int id) { return groups.get(id); }
@Override public Set<Integer> keys() { return groups.keySet();}
@Override public Collection<Group> groups() { return groups.values(); }
@Override public int size() { return groups.size(); }
@Override
public boolean isPartialGroupCoverageSufficient(Collection<Node> nodes) {
if (size() == 1)
return true;
long activeDocuments = nodes.stream().mapToLong(Node::getActiveDocuments).sum();
return isGroupCoverageSufficient(activeDocuments, medianDocumentsPerGroup());
}
public long medianDocumentsPerGroup() {
if (isEmpty()) return 0;
var activeDocuments = groups().stream().map(Group::activeDocuments).collect(Collectors.toList());
return (long) Quantiles.median().compute(activeDocuments);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.