comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
Yes. | private void confirmApplicationOwnerships() {
ApplicationList.from(controller().applications().asList())
.withProjectId()
.hasProductionDeployment()
.asList()
.stream()
.filter(application -> application.createdAt().isBefore(controller().clock().instant().minus(Duration.ofDays(90))))
.forEach(application -> {
try {
Tenant tenant = tenantOf(application.id());
tenant.contact().ifPresent(contact -> {
ownershipIssues.confirmOwnership(application.ownershipIssueId(),
application.id(),
determineAssignee(tenant, application),
contact)
.ifPresent(newIssueId -> store(newIssueId, application.id()));
});
}
catch (RuntimeException e) {
log.log(Level.INFO, "Exception caught when attempting to file an issue for '" + application.id() + "': " + Exceptions.toMessageString(e));
}
});
} | tenant.contact().ifPresent(contact -> { | private void confirmApplicationOwnerships() {
ApplicationList.from(controller().applications().asList())
.withProjectId()
.hasProductionDeployment()
.asList()
.stream()
.filter(application -> application.createdAt().isBefore(controller().clock().instant().minus(Duration.ofDays(90))))
.forEach(application -> {
try {
Tenant tenant = tenantOf(application.id());
tenant.contact().ifPresent(contact -> {
ownershipIssues.confirmOwnership(application.ownershipIssueId(),
application.id(),
determineAssignee(tenant, application),
contact)
.ifPresent(newIssueId -> store(newIssueId, application.id()));
});
}
catch (RuntimeException e) {
log.log(Level.INFO, "Exception caught when attempting to file an issue for '" + application.id() + "': " + Exceptions.toMessageString(e));
}
});
} | class ApplicationOwnershipConfirmer extends Maintainer {
private final OwnershipIssues ownershipIssues;
public ApplicationOwnershipConfirmer(Controller controller, Duration interval, JobControl jobControl, OwnershipIssues ownershipIssues) {
super(controller, interval, jobControl);
this.ownershipIssues = ownershipIssues;
}
@Override
protected void maintain() {
confirmApplicationOwnerships();
ensureConfirmationResponses();
updateConfirmedApplicationOwners();
}
/** File an ownership issue with the owners of all applications we know about. */
/** Escalate ownership issues which have not been closed before a defined amount of time has passed. */
private void ensureConfirmationResponses() {
for (Application application : controller().applications().asList())
application.ownershipIssueId().ifPresent(issueId -> {
try {
Tenant tenant = tenantOf(application.id());
ownershipIssues.ensureResponse(issueId, tenant.type() == Tenant.Type.athenz ? tenant.contact() : Optional.empty());
}
catch (RuntimeException e) {
log.log(Level.INFO, "Exception caught when attempting to escalate issue with id '" + issueId + "': " + Exceptions.toMessageString(e));
}
});
}
private void updateConfirmedApplicationOwners() {
ApplicationList.from(controller().applications().asList())
.withProjectId()
.hasProductionDeployment()
.asList()
.stream()
.filter(application -> application.ownershipIssueId().isPresent())
.forEach(application -> {
IssueId ownershipIssueId = application.ownershipIssueId().get();
ownershipIssues.getConfirmedOwner(ownershipIssueId).ifPresent(owner -> {
controller().applications().lockIfPresent(application.id(), lockedApplication ->
controller().applications().store(lockedApplication.withOwner(owner)));
});
});
}
private User determineAssignee(Tenant tenant, Application application) {
return application.owner().orElse(
tenant instanceof UserTenant ? userFor(tenant) : null
);
}
private Tenant tenantOf(ApplicationId applicationId) {
return controller().tenants().get(applicationId.tenant())
.orElseThrow(() -> new IllegalStateException("No tenant found for application " + applicationId));
}
protected User userFor(Tenant tenant) {
return User.from(tenant.name().value().replaceFirst(Tenant.userPrefix, ""));
}
protected void store(IssueId issueId, ApplicationId applicationId) {
controller().applications().lockIfPresent(applicationId, application ->
controller().applications().store(application.withOwnershipIssueId(issueId)));
}
} | class ApplicationOwnershipConfirmer extends Maintainer {
private final OwnershipIssues ownershipIssues;
public ApplicationOwnershipConfirmer(Controller controller, Duration interval, JobControl jobControl, OwnershipIssues ownershipIssues) {
super(controller, interval, jobControl);
this.ownershipIssues = ownershipIssues;
}
@Override
protected void maintain() {
confirmApplicationOwnerships();
ensureConfirmationResponses();
updateConfirmedApplicationOwners();
}
/** File an ownership issue with the owners of all applications we know about. */
/** Escalate ownership issues which have not been closed before a defined amount of time has passed. */
private void ensureConfirmationResponses() {
for (Application application : controller().applications().asList())
application.ownershipIssueId().ifPresent(issueId -> {
try {
Tenant tenant = tenantOf(application.id());
ownershipIssues.ensureResponse(issueId, tenant.type() == Tenant.Type.athenz ? tenant.contact() : Optional.empty());
}
catch (RuntimeException e) {
log.log(Level.INFO, "Exception caught when attempting to escalate issue with id '" + issueId + "': " + Exceptions.toMessageString(e));
}
});
}
private void updateConfirmedApplicationOwners() {
ApplicationList.from(controller().applications().asList())
.withProjectId()
.hasProductionDeployment()
.asList()
.stream()
.filter(application -> application.ownershipIssueId().isPresent())
.forEach(application -> {
IssueId ownershipIssueId = application.ownershipIssueId().get();
ownershipIssues.getConfirmedOwner(ownershipIssueId).ifPresent(owner -> {
controller().applications().lockIfPresent(application.id(), lockedApplication ->
controller().applications().store(lockedApplication.withOwner(owner)));
});
});
}
private User determineAssignee(Tenant tenant, Application application) {
return application.owner().orElse(
tenant instanceof UserTenant ? userFor(tenant) : null
);
}
private Tenant tenantOf(ApplicationId applicationId) {
return controller().tenants().get(applicationId.tenant())
.orElseThrow(() -> new IllegalStateException("No tenant found for application " + applicationId));
}
protected User userFor(Tenant tenant) {
return User.from(tenant.name().value().replaceFirst(Tenant.userPrefix, ""));
}
protected void store(IssueId issueId, ApplicationId applicationId) {
controller().applications().lockIfPresent(applicationId, application ->
controller().applications().store(application.withOwnershipIssueId(issueId)));
}
} |
Doesn't this cover both cases? In any case, creating the exception only when needed is probably preferable. | private static String requireName(String name) {
IllegalArgumentException e = new IllegalArgumentException("Unexpected non-normalized path found in zip content");
if (Arrays.asList(name.split("/")).contains("..")) throw e;
if (!name.equals(Path.of(name).normalize().toString())) throw e;
return name;
} | if (!name.equals(Path.of(name).normalize().toString())) throw e; | private static String requireName(String name) {
IllegalArgumentException e = new IllegalArgumentException("Unexpected non-normalized path found in zip content");
if (Arrays.asList(name.split("/")).contains("..")) throw e;
if (!name.equals(Path.of(name).normalize().toString())) throw e;
return name;
} | class ZipStreamReader {
private final ImmutableList<ZipEntryWithContent> entries;
private final int maxEntrySizeInBytes;
public ZipStreamReader(InputStream input, Predicate<String> entryNameMatcher, int maxEntrySizeInBytes) {
this.maxEntrySizeInBytes = maxEntrySizeInBytes;
try (ZipInputStream zipInput = new ZipInputStream(input)) {
ImmutableList.Builder<ZipEntryWithContent> builder = new ImmutableList.Builder<>();
ZipEntry zipEntry;
while (null != (zipEntry = zipInput.getNextEntry())) {
if (!entryNameMatcher.test(requireName(zipEntry.getName()))) continue;
builder.add(new ZipEntryWithContent(zipEntry, readContent(zipInput)));
}
entries = builder.build();
} catch (IOException e) {
throw new UncheckedIOException("IO error reading zip content", e);
}
}
private byte[] readContent(ZipInputStream zipInput) {
try (ByteArrayOutputStream bis = new ByteArrayOutputStream()) {
byte[] buffer = new byte[2048];
int read;
long size = 0;
while ( -1 != (read = zipInput.read(buffer))) {
size += read;
if (size > maxEntrySizeInBytes) {
throw new IllegalArgumentException("Entry in zip content exceeded size limit of " +
maxEntrySizeInBytes + " bytes");
}
bis.write(buffer, 0, read);
}
return bis.toByteArray();
} catch (IOException e) {
throw new UncheckedIOException("Failed reading from zipped content", e);
}
}
public List<ZipEntryWithContent> entries() { return entries; }
public static class ZipEntryWithContent {
private final ZipEntry zipEntry;
private final byte[] content;
public ZipEntryWithContent(ZipEntry zipEntry, byte[] content) {
this.zipEntry = zipEntry;
this.content = content;
}
public ZipEntry zipEntry() { return zipEntry; }
public byte[] content() { return content; }
}
} | class ZipStreamReader {
private final ImmutableList<ZipEntryWithContent> entries;
private final int maxEntrySizeInBytes;
public ZipStreamReader(InputStream input, Predicate<String> entryNameMatcher, int maxEntrySizeInBytes) {
this.maxEntrySizeInBytes = maxEntrySizeInBytes;
try (ZipInputStream zipInput = new ZipInputStream(input)) {
ImmutableList.Builder<ZipEntryWithContent> builder = new ImmutableList.Builder<>();
ZipEntry zipEntry;
while (null != (zipEntry = zipInput.getNextEntry())) {
if (!entryNameMatcher.test(requireName(zipEntry.getName()))) continue;
builder.add(new ZipEntryWithContent(zipEntry, readContent(zipInput)));
}
entries = builder.build();
} catch (IOException e) {
throw new UncheckedIOException("IO error reading zip content", e);
}
}
private byte[] readContent(ZipInputStream zipInput) {
try (ByteArrayOutputStream bis = new ByteArrayOutputStream()) {
byte[] buffer = new byte[2048];
int read;
long size = 0;
while ( -1 != (read = zipInput.read(buffer))) {
size += read;
if (size > maxEntrySizeInBytes) {
throw new IllegalArgumentException("Entry in zip content exceeded size limit of " +
maxEntrySizeInBytes + " bytes");
}
bis.write(buffer, 0, read);
}
return bis.toByteArray();
} catch (IOException e) {
throw new UncheckedIOException("Failed reading from zipped content", e);
}
}
public List<ZipEntryWithContent> entries() { return entries; }
public static class ZipEntryWithContent {
private final ZipEntry zipEntry;
private final byte[] content;
public ZipEntryWithContent(ZipEntry zipEntry, byte[] content) {
this.zipEntry = zipEntry;
this.content = content;
}
public ZipEntry zipEntry() { return zipEntry; }
public byte[] content() { return content; }
}
} |
Unfortunately not, `normalize` cannot normalize relative paths. E.g it can normalize `/foo/bar/../../services.xml`, but not `../../services.xml`. | private static String requireName(String name) {
IllegalArgumentException e = new IllegalArgumentException("Unexpected non-normalized path found in zip content");
if (Arrays.asList(name.split("/")).contains("..")) throw e;
if (!name.equals(Path.of(name).normalize().toString())) throw e;
return name;
} | if (!name.equals(Path.of(name).normalize().toString())) throw e; | private static String requireName(String name) {
IllegalArgumentException e = new IllegalArgumentException("Unexpected non-normalized path found in zip content");
if (Arrays.asList(name.split("/")).contains("..")) throw e;
if (!name.equals(Path.of(name).normalize().toString())) throw e;
return name;
} | class ZipStreamReader {
private final ImmutableList<ZipEntryWithContent> entries;
private final int maxEntrySizeInBytes;
public ZipStreamReader(InputStream input, Predicate<String> entryNameMatcher, int maxEntrySizeInBytes) {
this.maxEntrySizeInBytes = maxEntrySizeInBytes;
try (ZipInputStream zipInput = new ZipInputStream(input)) {
ImmutableList.Builder<ZipEntryWithContent> builder = new ImmutableList.Builder<>();
ZipEntry zipEntry;
while (null != (zipEntry = zipInput.getNextEntry())) {
if (!entryNameMatcher.test(requireName(zipEntry.getName()))) continue;
builder.add(new ZipEntryWithContent(zipEntry, readContent(zipInput)));
}
entries = builder.build();
} catch (IOException e) {
throw new UncheckedIOException("IO error reading zip content", e);
}
}
private byte[] readContent(ZipInputStream zipInput) {
try (ByteArrayOutputStream bis = new ByteArrayOutputStream()) {
byte[] buffer = new byte[2048];
int read;
long size = 0;
while ( -1 != (read = zipInput.read(buffer))) {
size += read;
if (size > maxEntrySizeInBytes) {
throw new IllegalArgumentException("Entry in zip content exceeded size limit of " +
maxEntrySizeInBytes + " bytes");
}
bis.write(buffer, 0, read);
}
return bis.toByteArray();
} catch (IOException e) {
throw new UncheckedIOException("Failed reading from zipped content", e);
}
}
public List<ZipEntryWithContent> entries() { return entries; }
public static class ZipEntryWithContent {
private final ZipEntry zipEntry;
private final byte[] content;
public ZipEntryWithContent(ZipEntry zipEntry, byte[] content) {
this.zipEntry = zipEntry;
this.content = content;
}
public ZipEntry zipEntry() { return zipEntry; }
public byte[] content() { return content; }
}
} | class ZipStreamReader {
private final ImmutableList<ZipEntryWithContent> entries;
private final int maxEntrySizeInBytes;
public ZipStreamReader(InputStream input, Predicate<String> entryNameMatcher, int maxEntrySizeInBytes) {
this.maxEntrySizeInBytes = maxEntrySizeInBytes;
try (ZipInputStream zipInput = new ZipInputStream(input)) {
ImmutableList.Builder<ZipEntryWithContent> builder = new ImmutableList.Builder<>();
ZipEntry zipEntry;
while (null != (zipEntry = zipInput.getNextEntry())) {
if (!entryNameMatcher.test(requireName(zipEntry.getName()))) continue;
builder.add(new ZipEntryWithContent(zipEntry, readContent(zipInput)));
}
entries = builder.build();
} catch (IOException e) {
throw new UncheckedIOException("IO error reading zip content", e);
}
}
private byte[] readContent(ZipInputStream zipInput) {
try (ByteArrayOutputStream bis = new ByteArrayOutputStream()) {
byte[] buffer = new byte[2048];
int read;
long size = 0;
while ( -1 != (read = zipInput.read(buffer))) {
size += read;
if (size > maxEntrySizeInBytes) {
throw new IllegalArgumentException("Entry in zip content exceeded size limit of " +
maxEntrySizeInBytes + " bytes");
}
bis.write(buffer, 0, read);
}
return bis.toByteArray();
} catch (IOException e) {
throw new UncheckedIOException("Failed reading from zipped content", e);
}
}
public List<ZipEntryWithContent> entries() { return entries; }
public static class ZipEntryWithContent {
private final ZipEntry zipEntry;
private final byte[] content;
public ZipEntryWithContent(ZipEntry zipEntry, byte[] content) {
this.zipEntry = zipEntry;
this.content = content;
}
public ZipEntry zipEntry() { return zipEntry; }
public byte[] content() { return content; }
}
} |
With multiple non-gzip files, this gzip an already gzipped stream? | void writeLogs(OutputStream outputStream, Instant earliestLogThreshold, Instant latestLogThreshold) {
try {
for (Path file : getMatchingFiles(earliestLogThreshold, latestLogThreshold)) {
if (!file.toString().endsWith(".gz")) {
outputStream = new GZIPOutputStream(outputStream);
}
Files.copy(file, outputStream);
}
outputStream.close();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
} | outputStream = new GZIPOutputStream(outputStream); | void writeLogs(OutputStream outputStream, Instant earliestLogThreshold, Instant latestLogThreshold) {
try {
for (Path file : getMatchingFiles(earliestLogThreshold, latestLogThreshold)) {
if (file.toString().endsWith(".gz")) {
Files.copy(file, outputStream);
} else {
OutputStream zip = new GZIPOutputStream(outputStream);
Files.copy(file, zip);
zip.close();
}
}
outputStream.close();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
} | class LogReader {
private final Path logDirectory;
private final Pattern logFilePattern;
LogReader(String logDirectory, String logFilePattern) {
this(Paths.get(Defaults.getDefaults().underVespaHome(logDirectory)), Pattern.compile(logFilePattern));
}
LogReader(Path logDirectory, Pattern logFilePattern) {
this.logDirectory = logDirectory;
this.logFilePattern = logFilePattern;
}
JSONObject readLogs(Instant earliestLogThreshold, Instant latestLogThreshold) throws IOException, JSONException {
JSONObject json = new JSONObject();
latestLogThreshold = latestLogThreshold.plus(Duration.ofMinutes(5));
for (Path file : getMatchingFiles(earliestLogThreshold, latestLogThreshold)) {
StringBuilder filenameBuilder = new StringBuilder();
logDirectory.relativize(file).iterator().forEachRemaining(p -> filenameBuilder.append("-").append(p.getFileName().toString()));
byte[] fileData = file.toString().endsWith(".gz") ? new GZIPInputStream(new ByteArrayInputStream(Files.readAllBytes(file))).readAllBytes() : Files.readAllBytes(file);
json.put(filenameBuilder.substring(1), Base64.getEncoder().encodeToString(fileData));
}
return json;
}
private List<Path> getMatchingFiles(Instant earliestLogThreshold, Instant latestLogThreshold) {
final List<Pair<Path, Instant>> paths = new LinkedList<>();
try {
Files.walkFileTree(logDirectory, new SimpleFileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
Instant lastModified = attrs.lastModifiedTime().toInstant();
if (lastModified.isAfter(earliestLogThreshold) &&
lastModified.isBefore(latestLogThreshold) &&
logFilePattern.matcher(file.getFileName().toString()).matches()) {
paths.add(new Pair<>(file, lastModified));
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
return FileVisitResult.CONTINUE;
}
});
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return paths.stream()
.sorted(Comparator.comparing(Pair::getSecond))
.map(Pair::getFirst)
.collect(Collectors.toList());
}
} | class LogReader {
private final Path logDirectory;
private final Pattern logFilePattern;
LogReader(String logDirectory, String logFilePattern) {
this(Paths.get(Defaults.getDefaults().underVespaHome(logDirectory)), Pattern.compile(logFilePattern));
}
LogReader(Path logDirectory, Pattern logFilePattern) {
this.logDirectory = logDirectory;
this.logFilePattern = logFilePattern;
}
JSONObject readLogs(Instant earliestLogThreshold, Instant latestLogThreshold) throws IOException, JSONException {
JSONObject json = new JSONObject();
latestLogThreshold = latestLogThreshold.plus(Duration.ofMinutes(5));
for (Path file : getMatchingFiles(earliestLogThreshold, latestLogThreshold)) {
StringBuilder filenameBuilder = new StringBuilder();
logDirectory.relativize(file).iterator().forEachRemaining(p -> filenameBuilder.append("-").append(p.getFileName().toString()));
byte[] fileData = file.toString().endsWith(".gz") ? new GZIPInputStream(new ByteArrayInputStream(Files.readAllBytes(file))).readAllBytes() : Files.readAllBytes(file);
json.put(filenameBuilder.substring(1), Base64.getEncoder().encodeToString(fileData));
}
return json;
}
private List<Path> getMatchingFiles(Instant earliestLogThreshold, Instant latestLogThreshold) {
final List<Pair<Path, Instant>> paths = new LinkedList<>();
try {
Files.walkFileTree(logDirectory, new SimpleFileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
Instant lastModified = attrs.lastModifiedTime().toInstant();
if (lastModified.isAfter(earliestLogThreshold) &&
lastModified.isBefore(latestLogThreshold) &&
logFilePattern.matcher(file.getFileName().toString()).matches()) {
paths.add(new Pair<>(file, lastModified));
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
return FileVisitResult.CONTINUE;
}
});
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return paths.stream()
.sorted(Comparator.comparing(Pair::getSecond))
.map(Pair::getFirst)
.collect(Collectors.toList());
}
} |
To make it more explicit: AthenzUser.fromUserId(userName) | private AthenzDomain addTenantAthenzDomain(String domainName, String userName) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) containerTester.container().components()
.getComponent(AthenzClientFactoryMock.class.getName());
AthenzDomain athensDomain = new AthenzDomain(domainName);
AthenzDbMock.Domain domain = new AthenzDbMock.Domain(athensDomain);
domain.markAsVespaTenant();
domain.admin(AthenzIdentities.from(new AthenzDomain("user"), userName));
mock.getSetup().addDomain(domain);
return athensDomain;
} | domain.admin(AthenzIdentities.from(new AthenzDomain("user"), userName)); | private AthenzDomain addTenantAthenzDomain(String domainName, String userName) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) containerTester.container().components()
.getComponent(AthenzClientFactoryMock.class.getName());
AthenzDomain athensDomain = new AthenzDomain(domainName);
AthenzDbMock.Domain domain = new AthenzDbMock.Domain(athensDomain);
domain.markAsVespaTenant();
domain.admin(new AthenzUser(userName));
mock.getSetup().addDomain(domain);
return athensDomain;
} | class ContainerControllerTester {
private final ContainerTester containerTester;
private final Upgrader upgrader;
public ContainerControllerTester(JDisc container, String responseFilePath) {
containerTester = new ContainerTester(container, responseFilePath);
CuratorDb curatorDb = new MockCuratorDb();
curatorDb.writeUpgradesPerMinute(100);
upgrader = new Upgrader(controller(), Duration.ofDays(1), new JobControl(curatorDb), curatorDb);
}
public Controller controller() { return containerTester.controller(); }
public ArtifactRepositoryMock artifactRepository() {
return (ArtifactRepositoryMock) containerTester.container().components()
.getComponent(ArtifactRepositoryMock.class.getName());
}
public Upgrader upgrader() { return upgrader; }
/** Returns the wrapped generic container tester */
public ContainerTester containerTester() { return containerTester; }
public Application createApplication() {
return createApplication("domain1","tenant1",
"application1");
}
public Application createApplication(String athensDomain, String tenant, String application) {
AthenzDomain domain1 = addTenantAthenzDomain(athensDomain, "user");
AthenzTenantPermit tenantPermit = new AthenzTenantPermit(TenantName.from(tenant),
new AthenzPrincipal(new AthenzUser("user")),
Optional.of(domain1),
Optional.of(new Property("property1")),
Optional.of(new PropertyId("1234")),
new OktaAccessToken("okta-token"));
controller().tenants().create(tenantPermit);
ApplicationId app = ApplicationId.from(tenant, application, "default");
AthenzApplicationPermit applicationPermit = new AthenzApplicationPermit(app,
domain1,
new OktaAccessToken("okta-token"));
return controller().applications().createApplication(app, Optional.of(applicationPermit));
}
public Application deploy(Application application, ApplicationPackage applicationPackage, ZoneId zone) {
controller().applications().deploy(application.id(), zone, Optional.of(applicationPackage),
new DeployOptions(false, Optional.empty(), false, false));
return application;
}
/** Notify the controller about a job completing */
public BuildJob jobCompletion(JobType job) {
return new BuildJob(this::notifyJobCompletion, artifactRepository()).type(job);
}
public void assertResponse(Request request, File expectedResponse) {
containerTester.assertResponse(request, expectedResponse);
}
public void assertResponse(Request request, String expectedResponse, int expectedStatusCode) {
containerTester.assertResponse(request, expectedResponse, expectedStatusCode);
}
/*
* Authorize action on tenantDomain/application for a given screwdriverId
*/
public void authorize(AthenzDomain tenantDomain, ScrewdriverId screwdriverId, ApplicationAction action, Application application) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) containerTester.container().components()
.getComponent(AthenzClientFactoryMock.class.getName());
mock.getSetup()
.domains.get(tenantDomain)
.applications.get(new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(application.id().application().value()))
.addRoleMember(action, HostedAthenzIdentities.from(screwdriverId));
}
private void notifyJobCompletion(DeploymentJobs.JobReport report) {
MockBuildService buildService = (MockBuildService) containerTester.container().components().getComponent(MockBuildService.class.getName());
if (report.jobType() != component && ! buildService.remove(report.buildJob()))
throw new IllegalArgumentException(report.jobType() + " is not running for " + report.applicationId());
assertFalse("Unexpected entry '" + report.jobType() + "@" + report.projectId() + " in: " + buildService.jobs(),
buildService.remove(report.buildJob()));
controller().applications().deploymentTrigger().notifyOfCompletion(report);
controller().applications().deploymentTrigger().triggerReadyJobs();
}
} | class ContainerControllerTester {
private final ContainerTester containerTester;
private final Upgrader upgrader;
public ContainerControllerTester(JDisc container, String responseFilePath) {
containerTester = new ContainerTester(container, responseFilePath);
CuratorDb curatorDb = new MockCuratorDb();
curatorDb.writeUpgradesPerMinute(100);
upgrader = new Upgrader(controller(), Duration.ofDays(1), new JobControl(curatorDb), curatorDb);
}
public Controller controller() { return containerTester.controller(); }
public ArtifactRepositoryMock artifactRepository() {
return (ArtifactRepositoryMock) containerTester.container().components()
.getComponent(ArtifactRepositoryMock.class.getName());
}
public Upgrader upgrader() { return upgrader; }
/** Returns the wrapped generic container tester */
public ContainerTester containerTester() { return containerTester; }
public Application createApplication() {
return createApplication("domain1","tenant1",
"application1");
}
public Application createApplication(String athensDomain, String tenant, String application) {
AthenzDomain domain1 = addTenantAthenzDomain(athensDomain, "user");
AthenzTenantPermit tenantPermit = new AthenzTenantPermit(TenantName.from(tenant),
new AthenzPrincipal(new AthenzUser("user")),
Optional.of(domain1),
Optional.of(new Property("property1")),
Optional.of(new PropertyId("1234")),
new OktaAccessToken("okta-token"));
controller().tenants().create(tenantPermit);
ApplicationId app = ApplicationId.from(tenant, application, "default");
AthenzApplicationPermit applicationPermit = new AthenzApplicationPermit(app,
domain1,
new OktaAccessToken("okta-token"));
return controller().applications().createApplication(app, Optional.of(applicationPermit));
}
public Application deploy(Application application, ApplicationPackage applicationPackage, ZoneId zone) {
controller().applications().deploy(application.id(), zone, Optional.of(applicationPackage),
new DeployOptions(false, Optional.empty(), false, false));
return application;
}
/** Notify the controller about a job completing */
public BuildJob jobCompletion(JobType job) {
return new BuildJob(this::notifyJobCompletion, artifactRepository()).type(job);
}
public void assertResponse(Request request, File expectedResponse) {
containerTester.assertResponse(request, expectedResponse);
}
public void assertResponse(Request request, String expectedResponse, int expectedStatusCode) {
containerTester.assertResponse(request, expectedResponse, expectedStatusCode);
}
/*
* Authorize action on tenantDomain/application for a given screwdriverId
*/
public void authorize(AthenzDomain tenantDomain, ScrewdriverId screwdriverId, ApplicationAction action, Application application) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) containerTester.container().components()
.getComponent(AthenzClientFactoryMock.class.getName());
mock.getSetup()
.domains.get(tenantDomain)
.applications.get(new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(application.id().application().value()))
.addRoleMember(action, HostedAthenzIdentities.from(screwdriverId));
}
private void notifyJobCompletion(DeploymentJobs.JobReport report) {
MockBuildService buildService = (MockBuildService) containerTester.container().components().getComponent(MockBuildService.class.getName());
if (report.jobType() != component && ! buildService.remove(report.buildJob()))
throw new IllegalArgumentException(report.jobType() + " is not running for " + report.applicationId());
assertFalse("Unexpected entry '" + report.jobType() + "@" + report.projectId() + " in: " + buildService.jobs(),
buildService.remove(report.buildJob()));
controller().applications().deploymentTrigger().notifyOfCompletion(report);
controller().applications().deploymentTrigger().triggerReadyJobs();
}
} |
Done. | private AthenzDomain addTenantAthenzDomain(String domainName, String userName) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) containerTester.container().components()
.getComponent(AthenzClientFactoryMock.class.getName());
AthenzDomain athensDomain = new AthenzDomain(domainName);
AthenzDbMock.Domain domain = new AthenzDbMock.Domain(athensDomain);
domain.markAsVespaTenant();
domain.admin(AthenzIdentities.from(new AthenzDomain("user"), userName));
mock.getSetup().addDomain(domain);
return athensDomain;
} | domain.admin(AthenzIdentities.from(new AthenzDomain("user"), userName)); | private AthenzDomain addTenantAthenzDomain(String domainName, String userName) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) containerTester.container().components()
.getComponent(AthenzClientFactoryMock.class.getName());
AthenzDomain athensDomain = new AthenzDomain(domainName);
AthenzDbMock.Domain domain = new AthenzDbMock.Domain(athensDomain);
domain.markAsVespaTenant();
domain.admin(new AthenzUser(userName));
mock.getSetup().addDomain(domain);
return athensDomain;
} | class ContainerControllerTester {
private final ContainerTester containerTester;
private final Upgrader upgrader;
public ContainerControllerTester(JDisc container, String responseFilePath) {
containerTester = new ContainerTester(container, responseFilePath);
CuratorDb curatorDb = new MockCuratorDb();
curatorDb.writeUpgradesPerMinute(100);
upgrader = new Upgrader(controller(), Duration.ofDays(1), new JobControl(curatorDb), curatorDb);
}
public Controller controller() { return containerTester.controller(); }
public ArtifactRepositoryMock artifactRepository() {
return (ArtifactRepositoryMock) containerTester.container().components()
.getComponent(ArtifactRepositoryMock.class.getName());
}
public Upgrader upgrader() { return upgrader; }
/** Returns the wrapped generic container tester */
public ContainerTester containerTester() { return containerTester; }
public Application createApplication() {
return createApplication("domain1","tenant1",
"application1");
}
public Application createApplication(String athensDomain, String tenant, String application) {
AthenzDomain domain1 = addTenantAthenzDomain(athensDomain, "user");
AthenzTenantPermit tenantPermit = new AthenzTenantPermit(TenantName.from(tenant),
new AthenzPrincipal(new AthenzUser("user")),
Optional.of(domain1),
Optional.of(new Property("property1")),
Optional.of(new PropertyId("1234")),
new OktaAccessToken("okta-token"));
controller().tenants().create(tenantPermit);
ApplicationId app = ApplicationId.from(tenant, application, "default");
AthenzApplicationPermit applicationPermit = new AthenzApplicationPermit(app,
domain1,
new OktaAccessToken("okta-token"));
return controller().applications().createApplication(app, Optional.of(applicationPermit));
}
public Application deploy(Application application, ApplicationPackage applicationPackage, ZoneId zone) {
controller().applications().deploy(application.id(), zone, Optional.of(applicationPackage),
new DeployOptions(false, Optional.empty(), false, false));
return application;
}
/** Notify the controller about a job completing */
public BuildJob jobCompletion(JobType job) {
return new BuildJob(this::notifyJobCompletion, artifactRepository()).type(job);
}
public void assertResponse(Request request, File expectedResponse) {
containerTester.assertResponse(request, expectedResponse);
}
public void assertResponse(Request request, String expectedResponse, int expectedStatusCode) {
containerTester.assertResponse(request, expectedResponse, expectedStatusCode);
}
/*
* Authorize action on tenantDomain/application for a given screwdriverId
*/
public void authorize(AthenzDomain tenantDomain, ScrewdriverId screwdriverId, ApplicationAction action, Application application) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) containerTester.container().components()
.getComponent(AthenzClientFactoryMock.class.getName());
mock.getSetup()
.domains.get(tenantDomain)
.applications.get(new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(application.id().application().value()))
.addRoleMember(action, HostedAthenzIdentities.from(screwdriverId));
}
private void notifyJobCompletion(DeploymentJobs.JobReport report) {
MockBuildService buildService = (MockBuildService) containerTester.container().components().getComponent(MockBuildService.class.getName());
if (report.jobType() != component && ! buildService.remove(report.buildJob()))
throw new IllegalArgumentException(report.jobType() + " is not running for " + report.applicationId());
assertFalse("Unexpected entry '" + report.jobType() + "@" + report.projectId() + " in: " + buildService.jobs(),
buildService.remove(report.buildJob()));
controller().applications().deploymentTrigger().notifyOfCompletion(report);
controller().applications().deploymentTrigger().triggerReadyJobs();
}
} | class ContainerControllerTester {
private final ContainerTester containerTester;
private final Upgrader upgrader;
public ContainerControllerTester(JDisc container, String responseFilePath) {
containerTester = new ContainerTester(container, responseFilePath);
CuratorDb curatorDb = new MockCuratorDb();
curatorDb.writeUpgradesPerMinute(100);
upgrader = new Upgrader(controller(), Duration.ofDays(1), new JobControl(curatorDb), curatorDb);
}
public Controller controller() { return containerTester.controller(); }
public ArtifactRepositoryMock artifactRepository() {
return (ArtifactRepositoryMock) containerTester.container().components()
.getComponent(ArtifactRepositoryMock.class.getName());
}
public Upgrader upgrader() { return upgrader; }
/** Returns the wrapped generic container tester */
public ContainerTester containerTester() { return containerTester; }
public Application createApplication() {
return createApplication("domain1","tenant1",
"application1");
}
public Application createApplication(String athensDomain, String tenant, String application) {
AthenzDomain domain1 = addTenantAthenzDomain(athensDomain, "user");
AthenzTenantPermit tenantPermit = new AthenzTenantPermit(TenantName.from(tenant),
new AthenzPrincipal(new AthenzUser("user")),
Optional.of(domain1),
Optional.of(new Property("property1")),
Optional.of(new PropertyId("1234")),
new OktaAccessToken("okta-token"));
controller().tenants().create(tenantPermit);
ApplicationId app = ApplicationId.from(tenant, application, "default");
AthenzApplicationPermit applicationPermit = new AthenzApplicationPermit(app,
domain1,
new OktaAccessToken("okta-token"));
return controller().applications().createApplication(app, Optional.of(applicationPermit));
}
public Application deploy(Application application, ApplicationPackage applicationPackage, ZoneId zone) {
controller().applications().deploy(application.id(), zone, Optional.of(applicationPackage),
new DeployOptions(false, Optional.empty(), false, false));
return application;
}
/** Notify the controller about a job completing */
public BuildJob jobCompletion(JobType job) {
return new BuildJob(this::notifyJobCompletion, artifactRepository()).type(job);
}
public void assertResponse(Request request, File expectedResponse) {
containerTester.assertResponse(request, expectedResponse);
}
public void assertResponse(Request request, String expectedResponse, int expectedStatusCode) {
containerTester.assertResponse(request, expectedResponse, expectedStatusCode);
}
/*
* Authorize action on tenantDomain/application for a given screwdriverId
*/
public void authorize(AthenzDomain tenantDomain, ScrewdriverId screwdriverId, ApplicationAction action, Application application) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) containerTester.container().components()
.getComponent(AthenzClientFactoryMock.class.getName());
mock.getSetup()
.domains.get(tenantDomain)
.applications.get(new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(application.id().application().value()))
.addRoleMember(action, HostedAthenzIdentities.from(screwdriverId));
}
private void notifyJobCompletion(DeploymentJobs.JobReport report) {
MockBuildService buildService = (MockBuildService) containerTester.container().components().getComponent(MockBuildService.class.getName());
if (report.jobType() != component && ! buildService.remove(report.buildJob()))
throw new IllegalArgumentException(report.jobType() + " is not running for " + report.applicationId());
assertFalse("Unexpected entry '" + report.jobType() + "@" + report.projectId() + " in: " + buildService.jobs(),
buildService.remove(report.buildJob()));
controller().applications().deploymentTrigger().notifyOfCompletion(report);
controller().applications().deploymentTrigger().triggerReadyJobs();
}
} |
Use single binding `*://*/logs`. | private void addLogHandler() {
Handler<?> logHandler = Handler.fromClassName(ContainerCluster.LOG_HANDLER_CLASS);
logHandler.addServerBindings("http:
addComponent(logHandler);
} | logHandler.addServerBindings("http: | private void addLogHandler() {
Handler<?> logHandler = Handler.fromClassName(ContainerCluster.LOG_HANDLER_CLASS);
logHandler.addServerBindings("*:
addComponent(logHandler);
} | class LogserverContainerCluster extends ContainerCluster<LogserverContainer> {
public LogserverContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) {
super(parent, subId, name, deployState);
addDefaultHandlersWithVip();
addLogHandler();
}
@Override
protected void myPrepare(DeployState deployState) { }
} | class LogserverContainerCluster extends ContainerCluster<LogserverContainer> {
public LogserverContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) {
super(parent, subId, name, deployState);
addDefaultHandlersWithVip();
addLogHandler();
}
@Override
protected void doPrepare(DeployState deployState) { }
} |
If the timeout is irrelevant when graceful=false, it should not be printed. | public void close() {
try {
log.log(Level.INFO, String.format("Shutting down server (graceful=%b, timeout=%.1fs)", isGracefulShutdownEnabled(), server.getStopTimeout()/1000d));
server.stop();
log.log(Level.INFO, "Server shutdown completed");
} catch (final Exception e) {
log.log(Level.SEVERE, "Server shutdown threw an unexpected exception.", e);
}
metricReporterExecutor.shutdown();
janitor.shutdown();
} | log.log(Level.INFO, String.format("Shutting down server (graceful=%b, timeout=%.1fs)", isGracefulShutdownEnabled(), server.getStopTimeout()/1000d)); | public void close() {
try {
log.log(Level.INFO, String.format("Shutting down server (graceful=%b, timeout=%.1fs)", isGracefulShutdownEnabled(), server.getStopTimeout()/1000d));
server.stop();
log.log(Level.INFO, "Server shutdown completed");
} catch (final Exception e) {
log.log(Level.SEVERE, "Server shutdown threw an unexpected exception.", e);
}
metricReporterExecutor.shutdown();
janitor.shutdown();
} | class + " port " + listenPort + ".");
}
ServiceReference<ServerSocketChannel> ref = refs.iterator().next();
return bundleContext.getService(ref);
}
private static ExecutorService newJanitor(ThreadFactory factory) {
int threadPoolSize = Runtime.getRuntime().availableProcessors();
log.info("Creating janitor executor with " + threadPoolSize + " threads");
return Executors.newFixedThreadPool(
threadPoolSize,
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat(JettyHttpServer.class.getName() + "-Janitor-%d")
.setThreadFactory(factory)
.build()
);
} | class + " port " + listenPort + ".");
}
ServiceReference<ServerSocketChannel> ref = refs.iterator().next();
return bundleContext.getService(ref);
}
private static ExecutorService newJanitor(ThreadFactory factory) {
int threadPoolSize = Runtime.getRuntime().availableProcessors();
log.info("Creating janitor executor with " + threadPoolSize + " threads");
return Executors.newFixedThreadPool(
threadPoolSize,
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat(JettyHttpServer.class.getName() + "-Janitor-%d")
.setThreadFactory(factory)
.build()
);
} |
The timeout still impacts other parts of Jetty (stopTimeout is generic concept for all `LifeCycle` objects in Jetty). | public void close() {
try {
log.log(Level.INFO, String.format("Shutting down server (graceful=%b, timeout=%.1fs)", isGracefulShutdownEnabled(), server.getStopTimeout()/1000d));
server.stop();
log.log(Level.INFO, "Server shutdown completed");
} catch (final Exception e) {
log.log(Level.SEVERE, "Server shutdown threw an unexpected exception.", e);
}
metricReporterExecutor.shutdown();
janitor.shutdown();
} | log.log(Level.INFO, String.format("Shutting down server (graceful=%b, timeout=%.1fs)", isGracefulShutdownEnabled(), server.getStopTimeout()/1000d)); | public void close() {
try {
log.log(Level.INFO, String.format("Shutting down server (graceful=%b, timeout=%.1fs)", isGracefulShutdownEnabled(), server.getStopTimeout()/1000d));
server.stop();
log.log(Level.INFO, "Server shutdown completed");
} catch (final Exception e) {
log.log(Level.SEVERE, "Server shutdown threw an unexpected exception.", e);
}
metricReporterExecutor.shutdown();
janitor.shutdown();
} | class + " port " + listenPort + ".");
}
ServiceReference<ServerSocketChannel> ref = refs.iterator().next();
return bundleContext.getService(ref);
}
private static ExecutorService newJanitor(ThreadFactory factory) {
int threadPoolSize = Runtime.getRuntime().availableProcessors();
log.info("Creating janitor executor with " + threadPoolSize + " threads");
return Executors.newFixedThreadPool(
threadPoolSize,
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat(JettyHttpServer.class.getName() + "-Janitor-%d")
.setThreadFactory(factory)
.build()
);
} | class + " port " + listenPort + ".");
}
ServiceReference<ServerSocketChannel> ref = refs.iterator().next();
return bundleContext.getService(ref);
}
private static ExecutorService newJanitor(ThreadFactory factory) {
int threadPoolSize = Runtime.getRuntime().availableProcessors();
log.info("Creating janitor executor with " + threadPoolSize + " threads");
return Executors.newFixedThreadPool(
threadPoolSize,
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat(JettyHttpServer.class.getName() + "-Janitor-%d")
.setThreadFactory(factory)
.build()
);
} |
Is this used by any clients? | private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/user")) return authenticatedUser(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines();
if (path.matches("/application/v4/property")) return properties();
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
} | if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); | private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/user")) return authenticatedUser(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines();
if (path.matches("/application/v4/property")) return properties();
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
} | class ApplicationApiHandler extends LoggingRequestHandler {
private final Controller controller;
private final PermitExtractor permits;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
PermitExtractor permits) {
super(parentCtx);
this.controller = controller;
this.permits = permits;
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case PUT: return handlePUT(request);
case POST: return handlePOST(request);
case PATCH: return handlePATCH(request);
case DELETE: return handleDELETE(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return ErrorResponse.from(e);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/user")) return createUser(request);
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}"))
return setMajorVersion(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyJsonResponse response = new EmptyJsonResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property");
}
private HttpResponse authenticatedUser(HttpRequest request) {
AthenzPrincipal user = getUserPrincipal(request);
if (user == null)
throw new NotAuthorizedException("You must be authenticated.");
List<Tenant> tenants = controller.tenants().asList(user);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setString("user", user.getIdentity().getName());
Cursor tenantsArray = response.setArray("tenants");
for (Tenant tenant : tenants)
tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject());
response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant.type() == Type.user &&
((UserTenant) tenant).is(user.getIdentity().getName())));
return new SlimeJsonResponse(slime);
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
/** Lists the screwdriver project id for each application */
private HttpResponse tenantPipelines() {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor pipelinesArray = response.setArray("tenantPipelines");
for (Application application : controller.applications().asList()) {
if ( ! application.deploymentJobs().projectId().isPresent()) continue;
Cursor pipelineObject = pipelinesArray.addObject();
pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().getAsLong()));
pipelineObject.setString("tenant", application.id().tenant().value());
pipelineObject.setString("application", application.id().application().value());
pipelineObject.setString("instance", application.id().instance().value());
}
response.setArray("brokenTenantPipelines");
return new SlimeJsonResponse(slime);
}
private HttpResponse properties() {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor array = response.setArray("properties");
for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) {
Cursor propertyObject = array.addObject();
propertyObject.setString("propertyid", entry.getKey().id());
propertyObject.setString("property", entry.getValue().id());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Slime slime = new Slime();
Cursor array = slime.setArray();
for (Application application : controller.applications().asList(tenant))
toSlime(application, array.addObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse setMajorVersion(String tenantName, String applicationName, HttpRequest request) {
Application application = getApplication(tenantName, applicationName);
Inspector majorVersionField = toSlime(request.getData()).get().field("majorVersion");
if ( ! majorVersionField.valid())
throw new IllegalArgumentException("Request body must contain a majorVersion field");
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int)majorVersionField.asLong();
controller.applications().lockIfPresent(application.id(),
a -> controller.applications().store(a.withMajorVersion(majorVersion)));
return new MessageResponse("Set major version to " + ( majorVersion == null ? "empty" : majorVersion));
}
private Application getApplication(String tenantName, String applicationName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
return controller.applications().get(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
if (queryParameters.containsKey("streaming")) {
InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters);
Slime slime = new Slime();
Cursor object = slime.setObject();
if (response.isPresent()) {
response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
String triggered = controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName())
.stream().map(JobType::jobName).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + application.id().instance().value() + "/job/",
request.getUri()).toString());
application.deploymentJobs().statusOf(JobType.component)
.flatMap(JobStatus::lastSuccess)
.map(run -> run.application().source())
.ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source")));
application.deploymentJobs().projectId()
.ifPresent(id -> object.setLong("projectId", id));
if ( ! application.change().isEmpty()) {
toSlime(object.setObject("deploying"), application.change());
}
if ( ! application.outstandingChange().isEmpty()) {
toSlime(object.setObject("outstandingChange"), application.outstandingChange());
}
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedJobs(application.deploymentJobs().jobStatus().values());
object.setBool("deployedInternally", application.deploymentJobs().deployedInternally());
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
});
object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor globalRotationsArray = object.setArray("globalRotations");
application.globalDnsName(controller.system()).ifPresent(rotation -> {
globalRotationsArray.addString(rotation.url().toString());
globalRotationsArray.addString(rotation.secureUrl().toString());
globalRotationsArray.addString(rotation.oathUrl().toString());
object.setString("rotationId", application.rotation().get().asString());
});
Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id());
for (RoutingPolicy policy : routingPolicies) {
for (RotationName rotation : policy.rotations()) {
GlobalDnsName dnsName = new GlobalDnsName(application.id(), controller.system(), rotation);
globalRotationsArray.addString(dnsName.oathUrl().toString());
}
}
List<Deployment> deployments = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedDeployments(application.deployments().values());
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", application.id().instance().value());
if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) {
toSlime(application.rotationStatus(deployment), deploymentObject);
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request);
else
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value() +
"/instance/" + application.id().instance().value(),
request.getUri()).toString());
}
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().get(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(application.id(),
ZoneId.from(environment, region));
Deployment deployment = application.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
Cursor serviceUrlArray = response.setArray("serviceUrls");
controller.applications().getDeploymentEndpoints(deploymentId)
.ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
controller.zoneRegistry().getLogServerUri(deploymentId)
.ifPresent(elkUrl -> response.setString("elkUrl", elkUrl.toString()));
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId()
.ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentCost appCost = deployment.calculateCost();
Cursor costObject = response.setObject("cost");
toSlime(appCost, costObject);
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if (!applicationVersion.isUnknown()) {
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if ( ! revision.isPresent()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationStatus status, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", status.name().toUpperCase());
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Inspector requestData = toSlime(request.getData()).get();
String reason = mandatory("reason", requestData).asString();
String agent = getUserPrincipal(request).getIdentity().getFullName();
long timestamp = controller.clock().instant().getEpochSecond();
EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp);
controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()),
endpointStatus);
return new MessageResponse(String.format("Successfully set %s in %s.%s %s service",
application.id().toShortString(),
deployment.zone().environment().value(),
deployment.zone().region().value(),
inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId);
for (RoutingEndpoint endpoint : status.keySet()) {
EndpointStatus currentStatus = status.get(endpoint);
array.addString(endpoint.upstreamName());
Cursor statusObject = array.addObject();
statusObject.setString("status", currentStatus.getStatus().name());
statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason());
statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent());
statusObject.setLong("timestamp", currentStatus.getEpoch());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().require(applicationId);
ZoneId zone = ZoneId.from(environment, region);
if (!application.rotation().isPresent()) {
throw new NotExistsException("global rotation does not exist for " + application);
}
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(application.rotationStatus(deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenant, String application, HttpRequest request) {
Application app = controller.applications().require(ApplicationId.from(tenant, application, "default"));
Slime slime = new Slime();
Cursor root = slime.setObject();
if (!app.change().isEmpty()) {
app.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", app.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse createUser(HttpRequest request) {
Optional<UserId> user = getUserId(request);
if ( ! user.isPresent()) throw new ForbiddenException("Not authenticated or not a user.");
String username = UserTenant.normalizeUser(user.get().id());
UserTenant tenant = UserTenant.create(username);
try {
controller.tenants().createUser(tenant);
return new MessageResponse("Created user '" + username + "'");
} catch (AlreadyExistsException e) {
return new MessageResponse("User '" + username + "' already exists");
}
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
controller.tenants().update(permits.getTenantPermit(TenantName.from(tenantName), request));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
controller.tenants().create(permits.getTenantPermit(TenantName.from(tenantName), request));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
try {
Optional<ApplicationPermit> permit = controller.tenants().require(id.tenant()).type() != Tenant.Type.user
? Optional.of(permits.getApplicationPermit(id, request)) : Optional.empty();
Application application = controller.applications().createApplication(id, permit);
Slime slime = new Slime();
toSlime(application, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
catch (ZmsClientException e) {
if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN)
throw new ForbiddenException("Not authorized to create application", e);
else
throw e;
}
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = application.get().change();
if (change.isEmpty()) {
response.append("No deployment in progress for " + application + " at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '" + change + "' to '" +
controller.applications().require(id).change() + "' for " + application);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the zone application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
/*
* Deploy direct is when we want to redeploy the current application - retrieve version
* info from the application package before deploying
*/
if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) {
Optional<Deployment> deployment = controller.applications().get(applicationId)
.map(Application::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(!deployment.isPresent())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass,
Optional.of(getUserPrincipal(request).getIdentity()));
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if ( ! tenant.isPresent())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
if (tenant.get().type() == Tenant.Type.user)
controller.tenants().deleteUser((UserTenant) tenant.get());
else
controller.tenants().delete(permits.getTenantPermit(tenant.get().name(), request));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
Optional<ApplicationPermit> permit = controller.tenants().require(id.tenant()).type() != Tenant.Type.user
? Optional.of(permits.getApplicationPermit(id, request)) : Optional.empty();
controller.applications().deleteApplication(id, permit);
return new EmptyJsonResponse();
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
controller.applications().deactivate(application.id(), ZoneId.from(environment, region));
return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
/**
* Promote application Chef environments. To be used by component jobs only
*/
private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) {
try{
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.systemChefEnvironment();
String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
/**
* Promote application Chef environments for jobs that deploy applications
*/
private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) {
try {
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) {
try {
DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get());
if ( report.jobType() == JobType.component
&& controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally())
throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " +
"longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.");
controller.applications().deploymentTrigger().notifyOfCompletion(report);
return new MessageResponse("ok");
} catch (IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
}
private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) {
Optional<DeploymentJobs.JobError> jobError = Optional.empty();
if (report.field("jobError").valid()) {
jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString()));
}
ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString());
JobType type = JobType.fromJobName(report.field("jobName").asString());
long buildNumber = report.field("buildNumber").asLong();
if (type == JobType.component)
return DeploymentJobs.JobReport.ofComponent(id,
report.field("projectId").asLong(),
buildNumber,
jobError,
toSourceRevision(report.field("sourceRevision")));
else
return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError);
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tentantType(tenant));
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
}
Cursor applicationArray = object.setArray("applications");
for (Application application : controller.applications().asList(tenant.name())) {
if (application.id().instance().isDefault()) {
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), application, request);
else
toSlime(application, applicationArray.addObject(), request);
}
}
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
}
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tentantType(tenant));
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(JobStatus.JobRun jobRun, Cursor object) {
object.setLong("id", jobRun.id());
object.setString("version", jobRun.platform().toFullString());
if (!jobRun.application().isUnknown())
toSlime(jobRun.application(), object.setObject("revision"));
object.setString("reason", jobRun.reason());
object.setLong("at", jobRun.at().toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Optional<UserId> getUserId(HttpRequest request) {
return Optional.of(getUserPrincipal(request))
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast)
.map(AthenzUser::getName)
.map(UserId::new);
}
private static AthenzPrincipal getUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
if (!(principal instanceof AthenzPrincipal))
throw new InternalServerErrorException(
String.format("Expected principal of type %s, got %s",
AthenzPrincipal.class.getSimpleName(), principal.getClass().getName()));
return (AthenzPrincipal) principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(Application application, Cursor object, HttpRequest request) {
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(), request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
public static void toSlime(DeploymentCost deploymentCost, Cursor object) {
object.setLong("tco", (long)deploymentCost.getTco());
object.setLong("waste", (long)deploymentCost.getWaste());
object.setDouble("utilization", deploymentCost.getUtilization());
Cursor clustersObject = object.setObject("cluster");
for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet())
toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey()));
}
private static void toSlime(ClusterCost clusterCost, Cursor object) {
object.setLong("count", clusterCost.getClusterInfo().getHostnames().size());
object.setString("resource", getResourceName(clusterCost.getResultUtilization()));
object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization());
object.setLong("tco", (int)clusterCost.getTco());
object.setLong("waste", (int)clusterCost.getWaste());
object.setString("flavor", clusterCost.getClusterInfo().getFlavor());
object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost());
object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU());
object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem());
object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk());
object.setString("type", clusterCost.getClusterInfo().getClusterType().name());
Cursor utilObject = object.setObject("util");
utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu());
utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory());
utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk());
utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy());
Cursor usageObject = object.setObject("usage");
usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu());
usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory());
usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk());
usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy());
Cursor hostnamesArray = object.setArray("hostnames");
for (String hostname : clusterCost.getClusterInfo().getHostnames())
hostnamesArray.addString(hostname);
}
private static String getResourceName(ClusterUtilization utilization) {
String name = "cpu";
double max = utilization.getMaxUtilization();
if (utilization.getMemory() == max) {
name = "mem";
} else if (utilization.getDisk() == max) {
name = "disk";
} else if (utilization.getDiskBusy() == max) {
name = "diskbusy";
}
return name;
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static String tentantType(Tenant tenant) {
if (tenant instanceof AthenzTenant) {
return "ATHENS";
} else if (tenant instanceof UserTenant) {
return "USER";
}
throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
SourceRevision sourceRevision = toSourceRevision(submitOptions);
String authorEmail = submitOptions.field("authorEmail").asString();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
applicationPackage,
Optional.of(getUserPrincipal(request).getIdentity()));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private final Controller controller;
private final PermitExtractor permits;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
PermitExtractor permits) {
super(parentCtx);
this.controller = controller;
this.permits = permits;
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case PUT: return handlePUT(request);
case POST: return handlePOST(request);
case PATCH: return handlePATCH(request);
case DELETE: return handleDELETE(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return ErrorResponse.from(e);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/user")) return createUser(request);
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}"))
return setMajorVersion(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyJsonResponse response = new EmptyJsonResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property");
}
private HttpResponse authenticatedUser(HttpRequest request) {
AthenzPrincipal user = getUserPrincipal(request);
if (user == null)
throw new NotAuthorizedException("You must be authenticated.");
List<Tenant> tenants = controller.tenants().asList(user);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setString("user", user.getIdentity().getName());
Cursor tenantsArray = response.setArray("tenants");
for (Tenant tenant : tenants)
tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject());
response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant.type() == Type.user &&
((UserTenant) tenant).is(user.getIdentity().getName())));
return new SlimeJsonResponse(slime);
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
/** Lists the screwdriver project id for each application */
private HttpResponse tenantPipelines() {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor pipelinesArray = response.setArray("tenantPipelines");
for (Application application : controller.applications().asList()) {
if ( ! application.deploymentJobs().projectId().isPresent()) continue;
Cursor pipelineObject = pipelinesArray.addObject();
pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().getAsLong()));
pipelineObject.setString("tenant", application.id().tenant().value());
pipelineObject.setString("application", application.id().application().value());
pipelineObject.setString("instance", application.id().instance().value());
}
response.setArray("brokenTenantPipelines");
return new SlimeJsonResponse(slime);
}
private HttpResponse properties() {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor array = response.setArray("properties");
for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) {
Cursor propertyObject = array.addObject();
propertyObject.setString("propertyid", entry.getKey().id());
propertyObject.setString("property", entry.getValue().id());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Slime slime = new Slime();
Cursor array = slime.setArray();
for (Application application : controller.applications().asList(tenant))
toSlime(application, array.addObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse setMajorVersion(String tenantName, String applicationName, HttpRequest request) {
Application application = getApplication(tenantName, applicationName);
Inspector majorVersionField = toSlime(request.getData()).get().field("majorVersion");
if ( ! majorVersionField.valid())
throw new IllegalArgumentException("Request body must contain a majorVersion field");
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int)majorVersionField.asLong();
controller.applications().lockIfPresent(application.id(),
a -> controller.applications().store(a.withMajorVersion(majorVersion)));
return new MessageResponse("Set major version to " + ( majorVersion == null ? "empty" : majorVersion));
}
private Application getApplication(String tenantName, String applicationName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
return controller.applications().get(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
if (queryParameters.containsKey("streaming")) {
InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters);
Slime slime = new Slime();
Cursor object = slime.setObject();
if (response.isPresent()) {
response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
String triggered = controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName())
.stream().map(JobType::jobName).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + application.id().instance().value() + "/job/",
request.getUri()).toString());
application.deploymentJobs().statusOf(JobType.component)
.flatMap(JobStatus::lastSuccess)
.map(run -> run.application().source())
.ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source")));
application.deploymentJobs().projectId()
.ifPresent(id -> object.setLong("projectId", id));
if ( ! application.change().isEmpty()) {
toSlime(object.setObject("deploying"), application.change());
}
if ( ! application.outstandingChange().isEmpty()) {
toSlime(object.setObject("outstandingChange"), application.outstandingChange());
}
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedJobs(application.deploymentJobs().jobStatus().values());
object.setBool("deployedInternally", application.deploymentJobs().deployedInternally());
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
});
object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor globalRotationsArray = object.setArray("globalRotations");
application.globalDnsName(controller.system()).ifPresent(rotation -> {
globalRotationsArray.addString(rotation.url().toString());
globalRotationsArray.addString(rotation.secureUrl().toString());
globalRotationsArray.addString(rotation.oathUrl().toString());
object.setString("rotationId", application.rotation().get().asString());
});
Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id());
for (RoutingPolicy policy : routingPolicies) {
for (RotationName rotation : policy.rotations()) {
GlobalDnsName dnsName = new GlobalDnsName(application.id(), controller.system(), rotation);
globalRotationsArray.addString(dnsName.oathUrl().toString());
}
}
List<Deployment> deployments = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedDeployments(application.deployments().values());
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", application.id().instance().value());
if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) {
toSlime(application.rotationStatus(deployment), deploymentObject);
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request);
else
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value() +
"/instance/" + application.id().instance().value(),
request.getUri()).toString());
}
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().get(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(application.id(),
ZoneId.from(environment, region));
Deployment deployment = application.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
Cursor serviceUrlArray = response.setArray("serviceUrls");
controller.applications().getDeploymentEndpoints(deploymentId)
.ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
controller.zoneRegistry().getLogServerUri(deploymentId)
.ifPresent(elkUrl -> response.setString("elkUrl", elkUrl.toString()));
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId()
.ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentCost appCost = deployment.calculateCost();
Cursor costObject = response.setObject("cost");
toSlime(appCost, costObject);
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if (!applicationVersion.isUnknown()) {
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if ( ! revision.isPresent()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationStatus status, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", status.name().toUpperCase());
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Inspector requestData = toSlime(request.getData()).get();
String reason = mandatory("reason", requestData).asString();
String agent = getUserPrincipal(request).getIdentity().getFullName();
long timestamp = controller.clock().instant().getEpochSecond();
EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp);
controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()),
endpointStatus);
return new MessageResponse(String.format("Successfully set %s in %s.%s %s service",
application.id().toShortString(),
deployment.zone().environment().value(),
deployment.zone().region().value(),
inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId);
for (RoutingEndpoint endpoint : status.keySet()) {
EndpointStatus currentStatus = status.get(endpoint);
array.addString(endpoint.upstreamName());
Cursor statusObject = array.addObject();
statusObject.setString("status", currentStatus.getStatus().name());
statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason());
statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent());
statusObject.setLong("timestamp", currentStatus.getEpoch());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().require(applicationId);
ZoneId zone = ZoneId.from(environment, region);
if (!application.rotation().isPresent()) {
throw new NotExistsException("global rotation does not exist for " + application);
}
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(application.rotationStatus(deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenant, String application, HttpRequest request) {
Application app = controller.applications().require(ApplicationId.from(tenant, application, "default"));
Slime slime = new Slime();
Cursor root = slime.setObject();
if (!app.change().isEmpty()) {
app.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", app.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse createUser(HttpRequest request) {
Optional<UserId> user = getUserId(request);
if ( ! user.isPresent()) throw new ForbiddenException("Not authenticated or not a user.");
String username = UserTenant.normalizeUser(user.get().id());
UserTenant tenant = UserTenant.create(username);
try {
controller.tenants().createUser(tenant);
return new MessageResponse("Created user '" + username + "'");
} catch (AlreadyExistsException e) {
return new MessageResponse("User '" + username + "' already exists");
}
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
controller.tenants().update(permits.getTenantPermit(TenantName.from(tenantName), request));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
controller.tenants().create(permits.getTenantPermit(TenantName.from(tenantName), request));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
try {
Optional<ApplicationPermit> permit = controller.tenants().require(id.tenant()).type() != Tenant.Type.user
? Optional.of(permits.getApplicationPermit(id, request)) : Optional.empty();
Application application = controller.applications().createApplication(id, permit);
Slime slime = new Slime();
toSlime(application, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
catch (ZmsClientException e) {
if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN)
throw new ForbiddenException("Not authorized to create application", e);
else
throw e;
}
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = application.get().change();
if (change.isEmpty()) {
response.append("No deployment in progress for " + application + " at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '" + change + "' to '" +
controller.applications().require(id).change() + "' for " + application);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the zone application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
/*
* Deploy direct is when we want to redeploy the current application - retrieve version
* info from the application package before deploying
*/
if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) {
Optional<Deployment> deployment = controller.applications().get(applicationId)
.map(Application::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(!deployment.isPresent())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass,
Optional.of(getUserPrincipal(request).getIdentity()));
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if ( ! tenant.isPresent())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
if (tenant.get().type() == Tenant.Type.user)
controller.tenants().deleteUser((UserTenant) tenant.get());
else
controller.tenants().delete(permits.getTenantPermit(tenant.get().name(), request));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
Optional<ApplicationPermit> permit = controller.tenants().require(id.tenant()).type() != Tenant.Type.user
? Optional.of(permits.getApplicationPermit(id, request)) : Optional.empty();
controller.applications().deleteApplication(id, permit);
return new EmptyJsonResponse();
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
controller.applications().deactivate(application.id(), ZoneId.from(environment, region));
return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
/**
* Promote application Chef environments. To be used by component jobs only
*/
private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) {
try{
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.systemChefEnvironment();
String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
/**
* Promote application Chef environments for jobs that deploy applications
*/
private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) {
try {
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) {
try {
DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get());
if ( report.jobType() == JobType.component
&& controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally())
throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " +
"longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.");
controller.applications().deploymentTrigger().notifyOfCompletion(report);
return new MessageResponse("ok");
} catch (IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
}
private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) {
Optional<DeploymentJobs.JobError> jobError = Optional.empty();
if (report.field("jobError").valid()) {
jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString()));
}
ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString());
JobType type = JobType.fromJobName(report.field("jobName").asString());
long buildNumber = report.field("buildNumber").asLong();
if (type == JobType.component)
return DeploymentJobs.JobReport.ofComponent(id,
report.field("projectId").asLong(),
buildNumber,
jobError,
toSourceRevision(report.field("sourceRevision")));
else
return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError);
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tentantType(tenant));
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
}
Cursor applicationArray = object.setArray("applications");
for (Application application : controller.applications().asList(tenant.name())) {
if (application.id().instance().isDefault()) {
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), application, request);
else
toSlime(application, applicationArray.addObject(), request);
}
}
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
}
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tentantType(tenant));
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(JobStatus.JobRun jobRun, Cursor object) {
object.setLong("id", jobRun.id());
object.setString("version", jobRun.platform().toFullString());
if (!jobRun.application().isUnknown())
toSlime(jobRun.application(), object.setObject("revision"));
object.setString("reason", jobRun.reason());
object.setLong("at", jobRun.at().toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Optional<UserId> getUserId(HttpRequest request) {
return Optional.of(getUserPrincipal(request))
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast)
.map(AthenzUser::getName)
.map(UserId::new);
}
private static AthenzPrincipal getUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
if (!(principal instanceof AthenzPrincipal))
throw new InternalServerErrorException(
String.format("Expected principal of type %s, got %s",
AthenzPrincipal.class.getSimpleName(), principal.getClass().getName()));
return (AthenzPrincipal) principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(Application application, Cursor object, HttpRequest request) {
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(), request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
public static void toSlime(DeploymentCost deploymentCost, Cursor object) {
object.setLong("tco", (long)deploymentCost.getTco());
object.setLong("waste", (long)deploymentCost.getWaste());
object.setDouble("utilization", deploymentCost.getUtilization());
Cursor clustersObject = object.setObject("cluster");
for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet())
toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey()));
}
private static void toSlime(ClusterCost clusterCost, Cursor object) {
object.setLong("count", clusterCost.getClusterInfo().getHostnames().size());
object.setString("resource", getResourceName(clusterCost.getResultUtilization()));
object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization());
object.setLong("tco", (int)clusterCost.getTco());
object.setLong("waste", (int)clusterCost.getWaste());
object.setString("flavor", clusterCost.getClusterInfo().getFlavor());
object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost());
object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU());
object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem());
object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk());
object.setString("type", clusterCost.getClusterInfo().getClusterType().name());
Cursor utilObject = object.setObject("util");
utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu());
utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory());
utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk());
utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy());
Cursor usageObject = object.setObject("usage");
usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu());
usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory());
usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk());
usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy());
Cursor hostnamesArray = object.setArray("hostnames");
for (String hostname : clusterCost.getClusterInfo().getHostnames())
hostnamesArray.addString(hostname);
}
private static String getResourceName(ClusterUtilization utilization) {
String name = "cpu";
double max = utilization.getMaxUtilization();
if (utilization.getMemory() == max) {
name = "mem";
} else if (utilization.getDisk() == max) {
name = "disk";
} else if (utilization.getDiskBusy() == max) {
name = "diskbusy";
}
return name;
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static String tentantType(Tenant tenant) {
if (tenant instanceof AthenzTenant) {
return "ATHENS";
} else if (tenant instanceof UserTenant) {
return "USER";
}
throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
SourceRevision sourceRevision = toSourceRevision(submitOptions);
String authorEmail = submitOptions.field("authorEmail").asString();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
applicationPackage,
Optional.of(getUserPrincipal(request).getIdentity()));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
} |
Nevermind, saw the other PR now. | private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/user")) return authenticatedUser(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines();
if (path.matches("/application/v4/property")) return properties();
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
} | if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); | private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/user")) return authenticatedUser(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines();
if (path.matches("/application/v4/property")) return properties();
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
} | class ApplicationApiHandler extends LoggingRequestHandler {
private final Controller controller;
private final PermitExtractor permits;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
PermitExtractor permits) {
super(parentCtx);
this.controller = controller;
this.permits = permits;
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case PUT: return handlePUT(request);
case POST: return handlePOST(request);
case PATCH: return handlePATCH(request);
case DELETE: return handleDELETE(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return ErrorResponse.from(e);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/user")) return createUser(request);
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}"))
return setMajorVersion(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyJsonResponse response = new EmptyJsonResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property");
}
private HttpResponse authenticatedUser(HttpRequest request) {
AthenzPrincipal user = getUserPrincipal(request);
if (user == null)
throw new NotAuthorizedException("You must be authenticated.");
List<Tenant> tenants = controller.tenants().asList(user);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setString("user", user.getIdentity().getName());
Cursor tenantsArray = response.setArray("tenants");
for (Tenant tenant : tenants)
tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject());
response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant.type() == Type.user &&
((UserTenant) tenant).is(user.getIdentity().getName())));
return new SlimeJsonResponse(slime);
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
/** Lists the screwdriver project id for each application */
private HttpResponse tenantPipelines() {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor pipelinesArray = response.setArray("tenantPipelines");
for (Application application : controller.applications().asList()) {
if ( ! application.deploymentJobs().projectId().isPresent()) continue;
Cursor pipelineObject = pipelinesArray.addObject();
pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().getAsLong()));
pipelineObject.setString("tenant", application.id().tenant().value());
pipelineObject.setString("application", application.id().application().value());
pipelineObject.setString("instance", application.id().instance().value());
}
response.setArray("brokenTenantPipelines");
return new SlimeJsonResponse(slime);
}
private HttpResponse properties() {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor array = response.setArray("properties");
for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) {
Cursor propertyObject = array.addObject();
propertyObject.setString("propertyid", entry.getKey().id());
propertyObject.setString("property", entry.getValue().id());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Slime slime = new Slime();
Cursor array = slime.setArray();
for (Application application : controller.applications().asList(tenant))
toSlime(application, array.addObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse setMajorVersion(String tenantName, String applicationName, HttpRequest request) {
Application application = getApplication(tenantName, applicationName);
Inspector majorVersionField = toSlime(request.getData()).get().field("majorVersion");
if ( ! majorVersionField.valid())
throw new IllegalArgumentException("Request body must contain a majorVersion field");
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int)majorVersionField.asLong();
controller.applications().lockIfPresent(application.id(),
a -> controller.applications().store(a.withMajorVersion(majorVersion)));
return new MessageResponse("Set major version to " + ( majorVersion == null ? "empty" : majorVersion));
}
private Application getApplication(String tenantName, String applicationName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
return controller.applications().get(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
if (queryParameters.containsKey("streaming")) {
InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters);
Slime slime = new Slime();
Cursor object = slime.setObject();
if (response.isPresent()) {
response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
String triggered = controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName())
.stream().map(JobType::jobName).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + application.id().instance().value() + "/job/",
request.getUri()).toString());
application.deploymentJobs().statusOf(JobType.component)
.flatMap(JobStatus::lastSuccess)
.map(run -> run.application().source())
.ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source")));
application.deploymentJobs().projectId()
.ifPresent(id -> object.setLong("projectId", id));
if ( ! application.change().isEmpty()) {
toSlime(object.setObject("deploying"), application.change());
}
if ( ! application.outstandingChange().isEmpty()) {
toSlime(object.setObject("outstandingChange"), application.outstandingChange());
}
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedJobs(application.deploymentJobs().jobStatus().values());
object.setBool("deployedInternally", application.deploymentJobs().deployedInternally());
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
});
object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor globalRotationsArray = object.setArray("globalRotations");
application.globalDnsName(controller.system()).ifPresent(rotation -> {
globalRotationsArray.addString(rotation.url().toString());
globalRotationsArray.addString(rotation.secureUrl().toString());
globalRotationsArray.addString(rotation.oathUrl().toString());
object.setString("rotationId", application.rotation().get().asString());
});
Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id());
for (RoutingPolicy policy : routingPolicies) {
for (RotationName rotation : policy.rotations()) {
GlobalDnsName dnsName = new GlobalDnsName(application.id(), controller.system(), rotation);
globalRotationsArray.addString(dnsName.oathUrl().toString());
}
}
List<Deployment> deployments = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedDeployments(application.deployments().values());
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", application.id().instance().value());
if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) {
toSlime(application.rotationStatus(deployment), deploymentObject);
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request);
else
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value() +
"/instance/" + application.id().instance().value(),
request.getUri()).toString());
}
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().get(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(application.id(),
ZoneId.from(environment, region));
Deployment deployment = application.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
Cursor serviceUrlArray = response.setArray("serviceUrls");
controller.applications().getDeploymentEndpoints(deploymentId)
.ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
controller.zoneRegistry().getLogServerUri(deploymentId)
.ifPresent(elkUrl -> response.setString("elkUrl", elkUrl.toString()));
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId()
.ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentCost appCost = deployment.calculateCost();
Cursor costObject = response.setObject("cost");
toSlime(appCost, costObject);
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if (!applicationVersion.isUnknown()) {
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if ( ! revision.isPresent()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationStatus status, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", status.name().toUpperCase());
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Inspector requestData = toSlime(request.getData()).get();
String reason = mandatory("reason", requestData).asString();
String agent = getUserPrincipal(request).getIdentity().getFullName();
long timestamp = controller.clock().instant().getEpochSecond();
EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp);
controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()),
endpointStatus);
return new MessageResponse(String.format("Successfully set %s in %s.%s %s service",
application.id().toShortString(),
deployment.zone().environment().value(),
deployment.zone().region().value(),
inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId);
for (RoutingEndpoint endpoint : status.keySet()) {
EndpointStatus currentStatus = status.get(endpoint);
array.addString(endpoint.upstreamName());
Cursor statusObject = array.addObject();
statusObject.setString("status", currentStatus.getStatus().name());
statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason());
statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent());
statusObject.setLong("timestamp", currentStatus.getEpoch());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().require(applicationId);
ZoneId zone = ZoneId.from(environment, region);
if (!application.rotation().isPresent()) {
throw new NotExistsException("global rotation does not exist for " + application);
}
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(application.rotationStatus(deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenant, String application, HttpRequest request) {
Application app = controller.applications().require(ApplicationId.from(tenant, application, "default"));
Slime slime = new Slime();
Cursor root = slime.setObject();
if (!app.change().isEmpty()) {
app.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", app.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse createUser(HttpRequest request) {
Optional<UserId> user = getUserId(request);
if ( ! user.isPresent()) throw new ForbiddenException("Not authenticated or not a user.");
String username = UserTenant.normalizeUser(user.get().id());
UserTenant tenant = UserTenant.create(username);
try {
controller.tenants().createUser(tenant);
return new MessageResponse("Created user '" + username + "'");
} catch (AlreadyExistsException e) {
return new MessageResponse("User '" + username + "' already exists");
}
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
controller.tenants().update(permits.getTenantPermit(TenantName.from(tenantName), request));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
controller.tenants().create(permits.getTenantPermit(TenantName.from(tenantName), request));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
try {
Optional<ApplicationPermit> permit = controller.tenants().require(id.tenant()).type() != Tenant.Type.user
? Optional.of(permits.getApplicationPermit(id, request)) : Optional.empty();
Application application = controller.applications().createApplication(id, permit);
Slime slime = new Slime();
toSlime(application, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
catch (ZmsClientException e) {
if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN)
throw new ForbiddenException("Not authorized to create application", e);
else
throw e;
}
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = application.get().change();
if (change.isEmpty()) {
response.append("No deployment in progress for " + application + " at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '" + change + "' to '" +
controller.applications().require(id).change() + "' for " + application);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the zone application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
/*
* Deploy direct is when we want to redeploy the current application - retrieve version
* info from the application package before deploying
*/
if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) {
Optional<Deployment> deployment = controller.applications().get(applicationId)
.map(Application::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(!deployment.isPresent())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass,
Optional.of(getUserPrincipal(request).getIdentity()));
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if ( ! tenant.isPresent())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
if (tenant.get().type() == Tenant.Type.user)
controller.tenants().deleteUser((UserTenant) tenant.get());
else
controller.tenants().delete(permits.getTenantPermit(tenant.get().name(), request));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
Optional<ApplicationPermit> permit = controller.tenants().require(id.tenant()).type() != Tenant.Type.user
? Optional.of(permits.getApplicationPermit(id, request)) : Optional.empty();
controller.applications().deleteApplication(id, permit);
return new EmptyJsonResponse();
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
controller.applications().deactivate(application.id(), ZoneId.from(environment, region));
return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
/**
* Promote application Chef environments. To be used by component jobs only
*/
private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) {
try{
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.systemChefEnvironment();
String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
/**
* Promote application Chef environments for jobs that deploy applications
*/
private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) {
try {
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) {
try {
DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get());
if ( report.jobType() == JobType.component
&& controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally())
throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " +
"longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.");
controller.applications().deploymentTrigger().notifyOfCompletion(report);
return new MessageResponse("ok");
} catch (IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
}
private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) {
Optional<DeploymentJobs.JobError> jobError = Optional.empty();
if (report.field("jobError").valid()) {
jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString()));
}
ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString());
JobType type = JobType.fromJobName(report.field("jobName").asString());
long buildNumber = report.field("buildNumber").asLong();
if (type == JobType.component)
return DeploymentJobs.JobReport.ofComponent(id,
report.field("projectId").asLong(),
buildNumber,
jobError,
toSourceRevision(report.field("sourceRevision")));
else
return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError);
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tentantType(tenant));
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
}
Cursor applicationArray = object.setArray("applications");
for (Application application : controller.applications().asList(tenant.name())) {
if (application.id().instance().isDefault()) {
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), application, request);
else
toSlime(application, applicationArray.addObject(), request);
}
}
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
}
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tentantType(tenant));
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(JobStatus.JobRun jobRun, Cursor object) {
object.setLong("id", jobRun.id());
object.setString("version", jobRun.platform().toFullString());
if (!jobRun.application().isUnknown())
toSlime(jobRun.application(), object.setObject("revision"));
object.setString("reason", jobRun.reason());
object.setLong("at", jobRun.at().toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Optional<UserId> getUserId(HttpRequest request) {
return Optional.of(getUserPrincipal(request))
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast)
.map(AthenzUser::getName)
.map(UserId::new);
}
private static AthenzPrincipal getUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
if (!(principal instanceof AthenzPrincipal))
throw new InternalServerErrorException(
String.format("Expected principal of type %s, got %s",
AthenzPrincipal.class.getSimpleName(), principal.getClass().getName()));
return (AthenzPrincipal) principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(Application application, Cursor object, HttpRequest request) {
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(), request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
public static void toSlime(DeploymentCost deploymentCost, Cursor object) {
object.setLong("tco", (long)deploymentCost.getTco());
object.setLong("waste", (long)deploymentCost.getWaste());
object.setDouble("utilization", deploymentCost.getUtilization());
Cursor clustersObject = object.setObject("cluster");
for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet())
toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey()));
}
private static void toSlime(ClusterCost clusterCost, Cursor object) {
object.setLong("count", clusterCost.getClusterInfo().getHostnames().size());
object.setString("resource", getResourceName(clusterCost.getResultUtilization()));
object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization());
object.setLong("tco", (int)clusterCost.getTco());
object.setLong("waste", (int)clusterCost.getWaste());
object.setString("flavor", clusterCost.getClusterInfo().getFlavor());
object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost());
object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU());
object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem());
object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk());
object.setString("type", clusterCost.getClusterInfo().getClusterType().name());
Cursor utilObject = object.setObject("util");
utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu());
utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory());
utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk());
utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy());
Cursor usageObject = object.setObject("usage");
usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu());
usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory());
usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk());
usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy());
Cursor hostnamesArray = object.setArray("hostnames");
for (String hostname : clusterCost.getClusterInfo().getHostnames())
hostnamesArray.addString(hostname);
}
private static String getResourceName(ClusterUtilization utilization) {
String name = "cpu";
double max = utilization.getMaxUtilization();
if (utilization.getMemory() == max) {
name = "mem";
} else if (utilization.getDisk() == max) {
name = "disk";
} else if (utilization.getDiskBusy() == max) {
name = "diskbusy";
}
return name;
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static String tentantType(Tenant tenant) {
if (tenant instanceof AthenzTenant) {
return "ATHENS";
} else if (tenant instanceof UserTenant) {
return "USER";
}
throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
SourceRevision sourceRevision = toSourceRevision(submitOptions);
String authorEmail = submitOptions.field("authorEmail").asString();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
applicationPackage,
Optional.of(getUserPrincipal(request).getIdentity()));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private final Controller controller;
private final PermitExtractor permits;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
PermitExtractor permits) {
super(parentCtx);
this.controller = controller;
this.permits = permits;
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case PUT: return handlePUT(request);
case POST: return handlePOST(request);
case PATCH: return handlePATCH(request);
case DELETE: return handleDELETE(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return ErrorResponse.from(e);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/user")) return createUser(request);
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}"))
return setMajorVersion(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyJsonResponse response = new EmptyJsonResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property");
}
private HttpResponse authenticatedUser(HttpRequest request) {
AthenzPrincipal user = getUserPrincipal(request);
if (user == null)
throw new NotAuthorizedException("You must be authenticated.");
List<Tenant> tenants = controller.tenants().asList(user);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setString("user", user.getIdentity().getName());
Cursor tenantsArray = response.setArray("tenants");
for (Tenant tenant : tenants)
tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject());
response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant.type() == Type.user &&
((UserTenant) tenant).is(user.getIdentity().getName())));
return new SlimeJsonResponse(slime);
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
/** Lists the screwdriver project id for each application */
private HttpResponse tenantPipelines() {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor pipelinesArray = response.setArray("tenantPipelines");
for (Application application : controller.applications().asList()) {
if ( ! application.deploymentJobs().projectId().isPresent()) continue;
Cursor pipelineObject = pipelinesArray.addObject();
pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().getAsLong()));
pipelineObject.setString("tenant", application.id().tenant().value());
pipelineObject.setString("application", application.id().application().value());
pipelineObject.setString("instance", application.id().instance().value());
}
response.setArray("brokenTenantPipelines");
return new SlimeJsonResponse(slime);
}
private HttpResponse properties() {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor array = response.setArray("properties");
for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) {
Cursor propertyObject = array.addObject();
propertyObject.setString("propertyid", entry.getKey().id());
propertyObject.setString("property", entry.getValue().id());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Slime slime = new Slime();
Cursor array = slime.setArray();
for (Application application : controller.applications().asList(tenant))
toSlime(application, array.addObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse setMajorVersion(String tenantName, String applicationName, HttpRequest request) {
Application application = getApplication(tenantName, applicationName);
Inspector majorVersionField = toSlime(request.getData()).get().field("majorVersion");
if ( ! majorVersionField.valid())
throw new IllegalArgumentException("Request body must contain a majorVersion field");
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int)majorVersionField.asLong();
controller.applications().lockIfPresent(application.id(),
a -> controller.applications().store(a.withMajorVersion(majorVersion)));
return new MessageResponse("Set major version to " + ( majorVersion == null ? "empty" : majorVersion));
}
private Application getApplication(String tenantName, String applicationName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
return controller.applications().get(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
if (queryParameters.containsKey("streaming")) {
InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters);
Slime slime = new Slime();
Cursor object = slime.setObject();
if (response.isPresent()) {
response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
String triggered = controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName())
.stream().map(JobType::jobName).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + application.id().instance().value() + "/job/",
request.getUri()).toString());
application.deploymentJobs().statusOf(JobType.component)
.flatMap(JobStatus::lastSuccess)
.map(run -> run.application().source())
.ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source")));
application.deploymentJobs().projectId()
.ifPresent(id -> object.setLong("projectId", id));
if ( ! application.change().isEmpty()) {
toSlime(object.setObject("deploying"), application.change());
}
if ( ! application.outstandingChange().isEmpty()) {
toSlime(object.setObject("outstandingChange"), application.outstandingChange());
}
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedJobs(application.deploymentJobs().jobStatus().values());
object.setBool("deployedInternally", application.deploymentJobs().deployedInternally());
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
});
object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor globalRotationsArray = object.setArray("globalRotations");
application.globalDnsName(controller.system()).ifPresent(rotation -> {
globalRotationsArray.addString(rotation.url().toString());
globalRotationsArray.addString(rotation.secureUrl().toString());
globalRotationsArray.addString(rotation.oathUrl().toString());
object.setString("rotationId", application.rotation().get().asString());
});
Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id());
for (RoutingPolicy policy : routingPolicies) {
for (RotationName rotation : policy.rotations()) {
GlobalDnsName dnsName = new GlobalDnsName(application.id(), controller.system(), rotation);
globalRotationsArray.addString(dnsName.oathUrl().toString());
}
}
List<Deployment> deployments = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedDeployments(application.deployments().values());
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", application.id().instance().value());
if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) {
toSlime(application.rotationStatus(deployment), deploymentObject);
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request);
else
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value() +
"/instance/" + application.id().instance().value(),
request.getUri()).toString());
}
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().get(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(application.id(),
ZoneId.from(environment, region));
Deployment deployment = application.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
Cursor serviceUrlArray = response.setArray("serviceUrls");
controller.applications().getDeploymentEndpoints(deploymentId)
.ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
controller.zoneRegistry().getLogServerUri(deploymentId)
.ifPresent(elkUrl -> response.setString("elkUrl", elkUrl.toString()));
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId()
.ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentCost appCost = deployment.calculateCost();
Cursor costObject = response.setObject("cost");
toSlime(appCost, costObject);
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if (!applicationVersion.isUnknown()) {
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if ( ! revision.isPresent()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationStatus status, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", status.name().toUpperCase());
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Inspector requestData = toSlime(request.getData()).get();
String reason = mandatory("reason", requestData).asString();
String agent = getUserPrincipal(request).getIdentity().getFullName();
long timestamp = controller.clock().instant().getEpochSecond();
EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp);
controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()),
endpointStatus);
return new MessageResponse(String.format("Successfully set %s in %s.%s %s service",
application.id().toShortString(),
deployment.zone().environment().value(),
deployment.zone().region().value(),
inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId);
for (RoutingEndpoint endpoint : status.keySet()) {
EndpointStatus currentStatus = status.get(endpoint);
array.addString(endpoint.upstreamName());
Cursor statusObject = array.addObject();
statusObject.setString("status", currentStatus.getStatus().name());
statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason());
statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent());
statusObject.setLong("timestamp", currentStatus.getEpoch());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().require(applicationId);
ZoneId zone = ZoneId.from(environment, region);
if (!application.rotation().isPresent()) {
throw new NotExistsException("global rotation does not exist for " + application);
}
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(application.rotationStatus(deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenant, String application, HttpRequest request) {
Application app = controller.applications().require(ApplicationId.from(tenant, application, "default"));
Slime slime = new Slime();
Cursor root = slime.setObject();
if (!app.change().isEmpty()) {
app.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", app.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse createUser(HttpRequest request) {
Optional<UserId> user = getUserId(request);
if ( ! user.isPresent()) throw new ForbiddenException("Not authenticated or not a user.");
String username = UserTenant.normalizeUser(user.get().id());
UserTenant tenant = UserTenant.create(username);
try {
controller.tenants().createUser(tenant);
return new MessageResponse("Created user '" + username + "'");
} catch (AlreadyExistsException e) {
return new MessageResponse("User '" + username + "' already exists");
}
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
controller.tenants().update(permits.getTenantPermit(TenantName.from(tenantName), request));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
controller.tenants().create(permits.getTenantPermit(TenantName.from(tenantName), request));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
try {
Optional<ApplicationPermit> permit = controller.tenants().require(id.tenant()).type() != Tenant.Type.user
? Optional.of(permits.getApplicationPermit(id, request)) : Optional.empty();
Application application = controller.applications().createApplication(id, permit);
Slime slime = new Slime();
toSlime(application, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
catch (ZmsClientException e) {
if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN)
throw new ForbiddenException("Not authorized to create application", e);
else
throw e;
}
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = application.get().change();
if (change.isEmpty()) {
response.append("No deployment in progress for " + application + " at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '" + change + "' to '" +
controller.applications().require(id).change() + "' for " + application);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the zone application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
/*
* Deploy direct is when we want to redeploy the current application - retrieve version
* info from the application package before deploying
*/
if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) {
Optional<Deployment> deployment = controller.applications().get(applicationId)
.map(Application::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(!deployment.isPresent())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass,
Optional.of(getUserPrincipal(request).getIdentity()));
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if ( ! tenant.isPresent())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
if (tenant.get().type() == Tenant.Type.user)
controller.tenants().deleteUser((UserTenant) tenant.get());
else
controller.tenants().delete(permits.getTenantPermit(tenant.get().name(), request));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
Optional<ApplicationPermit> permit = controller.tenants().require(id.tenant()).type() != Tenant.Type.user
? Optional.of(permits.getApplicationPermit(id, request)) : Optional.empty();
controller.applications().deleteApplication(id, permit);
return new EmptyJsonResponse();
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
controller.applications().deactivate(application.id(), ZoneId.from(environment, region));
return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
/**
* Promote application Chef environments. To be used by component jobs only
*/
private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) {
try{
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.systemChefEnvironment();
String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
/**
* Promote application Chef environments for jobs that deploy applications
*/
private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) {
try {
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) {
try {
DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get());
if ( report.jobType() == JobType.component
&& controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally())
throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " +
"longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.");
controller.applications().deploymentTrigger().notifyOfCompletion(report);
return new MessageResponse("ok");
} catch (IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
}
private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) {
Optional<DeploymentJobs.JobError> jobError = Optional.empty();
if (report.field("jobError").valid()) {
jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString()));
}
ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString());
JobType type = JobType.fromJobName(report.field("jobName").asString());
long buildNumber = report.field("buildNumber").asLong();
if (type == JobType.component)
return DeploymentJobs.JobReport.ofComponent(id,
report.field("projectId").asLong(),
buildNumber,
jobError,
toSourceRevision(report.field("sourceRevision")));
else
return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError);
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tentantType(tenant));
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
}
Cursor applicationArray = object.setArray("applications");
for (Application application : controller.applications().asList(tenant.name())) {
if (application.id().instance().isDefault()) {
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), application, request);
else
toSlime(application, applicationArray.addObject(), request);
}
}
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
}
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tentantType(tenant));
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(JobStatus.JobRun jobRun, Cursor object) {
object.setLong("id", jobRun.id());
object.setString("version", jobRun.platform().toFullString());
if (!jobRun.application().isUnknown())
toSlime(jobRun.application(), object.setObject("revision"));
object.setString("reason", jobRun.reason());
object.setLong("at", jobRun.at().toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Optional<UserId> getUserId(HttpRequest request) {
return Optional.of(getUserPrincipal(request))
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast)
.map(AthenzUser::getName)
.map(UserId::new);
}
private static AthenzPrincipal getUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
if (!(principal instanceof AthenzPrincipal))
throw new InternalServerErrorException(
String.format("Expected principal of type %s, got %s",
AthenzPrincipal.class.getSimpleName(), principal.getClass().getName()));
return (AthenzPrincipal) principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(Application application, Cursor object, HttpRequest request) {
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(), request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
public static void toSlime(DeploymentCost deploymentCost, Cursor object) {
object.setLong("tco", (long)deploymentCost.getTco());
object.setLong("waste", (long)deploymentCost.getWaste());
object.setDouble("utilization", deploymentCost.getUtilization());
Cursor clustersObject = object.setObject("cluster");
for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet())
toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey()));
}
private static void toSlime(ClusterCost clusterCost, Cursor object) {
object.setLong("count", clusterCost.getClusterInfo().getHostnames().size());
object.setString("resource", getResourceName(clusterCost.getResultUtilization()));
object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization());
object.setLong("tco", (int)clusterCost.getTco());
object.setLong("waste", (int)clusterCost.getWaste());
object.setString("flavor", clusterCost.getClusterInfo().getFlavor());
object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost());
object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU());
object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem());
object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk());
object.setString("type", clusterCost.getClusterInfo().getClusterType().name());
Cursor utilObject = object.setObject("util");
utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu());
utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory());
utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk());
utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy());
Cursor usageObject = object.setObject("usage");
usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu());
usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory());
usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk());
usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy());
Cursor hostnamesArray = object.setArray("hostnames");
for (String hostname : clusterCost.getClusterInfo().getHostnames())
hostnamesArray.addString(hostname);
}
private static String getResourceName(ClusterUtilization utilization) {
String name = "cpu";
double max = utilization.getMaxUtilization();
if (utilization.getMemory() == max) {
name = "mem";
} else if (utilization.getDisk() == max) {
name = "disk";
} else if (utilization.getDiskBusy() == max) {
name = "diskbusy";
}
return name;
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static String tentantType(Tenant tenant) {
if (tenant instanceof AthenzTenant) {
return "ATHENS";
} else if (tenant instanceof UserTenant) {
return "USER";
}
throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
SourceRevision sourceRevision = toSourceRevision(submitOptions);
String authorEmail = submitOptions.field("authorEmail").asString();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
applicationPackage,
Optional.of(getUserPrincipal(request).getIdentity()));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
} |
I think this is quite important info so I suggest level2 instead of 5. | private void insertNetworkErrors() {
boolean asErrors = answeredNodes == 0;
if (!invokers.isEmpty()) {
String keys = invokers.stream().map(SearchInvoker::distributionKey).map(dk -> dk.map(i -> i.toString()).orElse("(unspecified)"))
.collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage
.createTimeout("Backend communication timeout on all nodes in group (distribution-keys: " + keys + ")"));
} else {
query.trace("Backend communication timeout on nodes with distribution-keys: " + keys, 5);
}
timedOut = true;
}
if (alreadyFailedNodes != null) {
var message = "Connection failure on nodes with distribution-keys: "
+ alreadyFailedNodes.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage.createBackendCommunicationError(message));
} else {
query.trace(message, 5);
}
int failed = alreadyFailedNodes.size();
askedNodes += failed;
answeredNodes += failed;
}
} | query.trace("Backend communication timeout on nodes with distribution-keys: " + keys, 5); | private void insertNetworkErrors() {
boolean asErrors = answeredNodes == 0;
if (!invokers.isEmpty()) {
String keys = invokers.stream().map(SearchInvoker::distributionKey).map(dk -> dk.map(i -> i.toString()).orElse("(unspecified)"))
.collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage
.createTimeout("Backend communication timeout on all nodes in group (distribution-keys: " + keys + ")"));
} else {
query.trace("Backend communication timeout on nodes with distribution-keys: " + keys, 2);
}
timedOut = true;
}
if (alreadyFailedNodes != null) {
var message = "Connection failure on nodes with distribution-keys: "
+ alreadyFailedNodes.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage.createBackendCommunicationError(message));
} else {
query.trace(message, 2);
}
int failed = alreadyFailedNodes.size();
askedNodes += failed;
answeredNodes += failed;
}
} | class InterleavedSearchInvoker extends SearchInvoker implements ResponseMonitor<SearchInvoker> {
private static final Logger log = Logger.getLogger(InterleavedSearchInvoker.class.getName());
private final Set<SearchInvoker> invokers;
private final VespaBackEndSearcher searcher;
private final SearchCluster searchCluster;
private final LinkedBlockingQueue<SearchInvoker> availableForProcessing;
private final Set<Integer> alreadyFailedNodes;
private Query query;
private boolean adaptiveTimeoutCalculated = false;
private long adaptiveTimeoutMin = 0;
private long adaptiveTimeoutMax = 0;
private long deadline = 0;
private Result result = null;
private long answeredDocs = 0;
private long answeredActiveDocs = 0;
private long answeredSoonActiveDocs = 0;
private int askedNodes = 0;
private int answeredNodes = 0;
private int answeredNodesParticipated = 0;
private boolean timedOut = false;
private boolean degradedByMatchPhase = false;
private boolean trimResult = false;
public InterleavedSearchInvoker(Collection<SearchInvoker> invokers, VespaBackEndSearcher searcher, SearchCluster searchCluster, Set<Integer> alreadyFailedNodes) {
super(Optional.empty());
this.invokers = Collections.newSetFromMap(new IdentityHashMap<>());
this.invokers.addAll(invokers);
this.searcher = searcher;
this.searchCluster = searchCluster;
this.availableForProcessing = newQueue();
this.alreadyFailedNodes = alreadyFailedNodes;
}
/**
* Sends search queries to the contained {@link SearchInvoker} sub-invokers. If the search
* query has an offset other than zero, it will be reset to zero and the expected hit amount
* will be adjusted accordingly.
*/
@Override
protected void sendSearchRequest(Query query, QueryPacket queryPacket) throws IOException {
this.query = query;
invokers.forEach(invoker -> invoker.setMonitor(this));
deadline = currentTime() + query.getTimeLeft();
int originalHits = query.getHits();
int originalOffset = query.getOffset();
query.setHits(query.getHits() + query.getOffset());
query.setOffset(0);
trimResult = originalHits != query.getHits() || originalOffset != query.getOffset();
for (SearchInvoker invoker : invokers) {
invoker.sendSearchRequest(query, null);
askedNodes++;
}
query.setHits(originalHits);
query.setOffset(originalOffset);
}
@Override
protected Result getSearchResult(Execution execution) throws IOException {
long nextTimeout = query.getTimeLeft();
try {
while (!invokers.isEmpty() && nextTimeout >= 0) {
SearchInvoker invoker = availableForProcessing.poll(nextTimeout, TimeUnit.MILLISECONDS);
if (invoker == null) {
log.fine(() -> "Search timed out with " + askedNodes + " requests made, " + answeredNodes + " responses received");
break;
} else {
mergeResult(invoker.getSearchResult(execution));
ejectInvoker(invoker);
}
nextTimeout = nextTimeout();
}
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted while waiting for search results", e);
}
if (result == null) {
result = new Result(query);
}
insertNetworkErrors();
result.setCoverage(createCoverage());
trimResult(execution);
Result ret = result;
result = null;
return ret;
}
private void trimResult(Execution execution) {
if (trimResult) {
if (result.getHitOrderer() != null) {
searcher.fill(result, Execution.ATTRIBUTEPREFETCH, execution);
}
result.hits().trim(query.getOffset(), query.getHits());
}
}
private long nextTimeout() {
DispatchConfig config = searchCluster.dispatchConfig();
double minimumCoverage = config.minSearchCoverage();
if (askedNodes == answeredNodes || minimumCoverage >= 100.0) {
return query.getTimeLeft();
}
int minimumResponses = (int) Math.ceil(askedNodes * minimumCoverage / 100.0);
if (answeredNodes < minimumResponses) {
return query.getTimeLeft();
}
long timeLeft = query.getTimeLeft();
if (!adaptiveTimeoutCalculated) {
adaptiveTimeoutMin = (long) (timeLeft * config.minWaitAfterCoverageFactor());
adaptiveTimeoutMax = (long) (timeLeft * config.maxWaitAfterCoverageFactor());
adaptiveTimeoutCalculated = true;
}
long now = currentTime();
int pendingQueries = askedNodes - answeredNodes;
double missWidth = ((100.0 - config.minSearchCoverage()) * askedNodes) / 100.0 - 1.0;
double slopedWait = adaptiveTimeoutMin;
if (pendingQueries > 1 && missWidth > 0.0) {
slopedWait += ((adaptiveTimeoutMax - adaptiveTimeoutMin) * (pendingQueries - 1)) / missWidth;
}
long nextAdaptive = (long) slopedWait;
if (now + nextAdaptive >= deadline) {
return deadline - now;
}
deadline = now + nextAdaptive;
return nextAdaptive;
}
private void mergeResult(Result partialResult) {
collectCoverage(partialResult.getCoverage(true));
if (result == null) {
result = partialResult;
return;
}
result.mergeWith(partialResult);
result.hits().addAll(partialResult.hits().asUnorderedHits());
}
private void collectCoverage(Coverage source) {
answeredDocs += source.getDocs();
answeredActiveDocs += source.getActive();
answeredSoonActiveDocs += source.getSoonActive();
answeredNodesParticipated += source.getNodes();
answeredNodes++;
degradedByMatchPhase |= source.isDegradedByMatchPhase();
timedOut |= source.isDegradedByTimeout();
}
private Coverage createCoverage() {
adjustDegradedCoverage();
Coverage coverage = new Coverage(answeredDocs, answeredActiveDocs, answeredNodesParticipated, 1);
coverage.setNodesTried(askedNodes);
coverage.setSoonActive(answeredSoonActiveDocs);
int degradedReason = 0;
if (timedOut) {
degradedReason |= (adaptiveTimeoutCalculated ? DEGRADED_BY_ADAPTIVE_TIMEOUT : DEGRADED_BY_TIMEOUT);
}
if (degradedByMatchPhase) {
degradedReason |= DEGRADED_BY_MATCH_PHASE;
}
coverage.setDegradedReason(degradedReason);
return coverage;
}
private void adjustDegradedCoverage() {
if (askedNodes == answeredNodesParticipated) {
return;
}
int notAnswered = askedNodes - answeredNodesParticipated;
if (adaptiveTimeoutCalculated && answeredNodesParticipated > 0) {
answeredActiveDocs += (notAnswered * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (notAnswered * answeredSoonActiveDocs / answeredNodesParticipated);
} else {
if (askedNodes > answeredNodesParticipated) {
int searchableCopies = (int) searchCluster.dispatchConfig().searchableCopies();
int missingNodes = notAnswered - (searchableCopies - 1);
if (answeredNodesParticipated > 0) {
answeredActiveDocs += (missingNodes * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (missingNodes * answeredSoonActiveDocs / answeredNodesParticipated);
timedOut = true;
}
}
}
}
private void ejectInvoker(SearchInvoker invoker) {
invokers.remove(invoker);
invoker.release();
}
@Override
protected void release() {
if (!invokers.isEmpty()) {
invokers.forEach(SearchInvoker::close);
invokers.clear();
}
}
@Override
public void responseAvailable(SearchInvoker from) {
if (availableForProcessing != null) {
availableForProcessing.add(from);
}
}
@Override
protected void setMonitor(ResponseMonitor<SearchInvoker> monitor) {
}
protected long currentTime() {
return System.currentTimeMillis();
}
protected LinkedBlockingQueue<SearchInvoker> newQueue() {
return new LinkedBlockingQueue<>();
}
} | class InterleavedSearchInvoker extends SearchInvoker implements ResponseMonitor<SearchInvoker> {
private static final Logger log = Logger.getLogger(InterleavedSearchInvoker.class.getName());
private final Set<SearchInvoker> invokers;
private final VespaBackEndSearcher searcher;
private final SearchCluster searchCluster;
private final LinkedBlockingQueue<SearchInvoker> availableForProcessing;
private final Set<Integer> alreadyFailedNodes;
private Query query;
private boolean adaptiveTimeoutCalculated = false;
private long adaptiveTimeoutMin = 0;
private long adaptiveTimeoutMax = 0;
private long deadline = 0;
private Result result = null;
private long answeredDocs = 0;
private long answeredActiveDocs = 0;
private long answeredSoonActiveDocs = 0;
private int askedNodes = 0;
private int answeredNodes = 0;
private int answeredNodesParticipated = 0;
private boolean timedOut = false;
private boolean degradedByMatchPhase = false;
private boolean trimResult = false;
public InterleavedSearchInvoker(Collection<SearchInvoker> invokers, VespaBackEndSearcher searcher, SearchCluster searchCluster, Set<Integer> alreadyFailedNodes) {
super(Optional.empty());
this.invokers = Collections.newSetFromMap(new IdentityHashMap<>());
this.invokers.addAll(invokers);
this.searcher = searcher;
this.searchCluster = searchCluster;
this.availableForProcessing = newQueue();
this.alreadyFailedNodes = alreadyFailedNodes;
}
/**
* Sends search queries to the contained {@link SearchInvoker} sub-invokers. If the search
* query has an offset other than zero, it will be reset to zero and the expected hit amount
* will be adjusted accordingly.
*/
@Override
protected void sendSearchRequest(Query query, QueryPacket queryPacket) throws IOException {
this.query = query;
invokers.forEach(invoker -> invoker.setMonitor(this));
deadline = currentTime() + query.getTimeLeft();
int originalHits = query.getHits();
int originalOffset = query.getOffset();
query.setHits(query.getHits() + query.getOffset());
query.setOffset(0);
trimResult = originalHits != query.getHits() || originalOffset != query.getOffset();
for (SearchInvoker invoker : invokers) {
invoker.sendSearchRequest(query, null);
askedNodes++;
}
query.setHits(originalHits);
query.setOffset(originalOffset);
}
@Override
protected Result getSearchResult(Execution execution) throws IOException {
long nextTimeout = query.getTimeLeft();
try {
while (!invokers.isEmpty() && nextTimeout >= 0) {
SearchInvoker invoker = availableForProcessing.poll(nextTimeout, TimeUnit.MILLISECONDS);
if (invoker == null) {
log.fine(() -> "Search timed out with " + askedNodes + " requests made, " + answeredNodes + " responses received");
break;
} else {
mergeResult(invoker.getSearchResult(execution));
ejectInvoker(invoker);
}
nextTimeout = nextTimeout();
}
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted while waiting for search results", e);
}
if (result == null) {
result = new Result(query);
}
insertNetworkErrors();
result.setCoverage(createCoverage());
trimResult(execution);
Result ret = result;
result = null;
return ret;
}
private void trimResult(Execution execution) {
if (trimResult) {
if (result.getHitOrderer() != null) {
searcher.fill(result, Execution.ATTRIBUTEPREFETCH, execution);
}
result.hits().trim(query.getOffset(), query.getHits());
}
}
private long nextTimeout() {
DispatchConfig config = searchCluster.dispatchConfig();
double minimumCoverage = config.minSearchCoverage();
if (askedNodes == answeredNodes || minimumCoverage >= 100.0) {
return query.getTimeLeft();
}
int minimumResponses = (int) Math.ceil(askedNodes * minimumCoverage / 100.0);
if (answeredNodes < minimumResponses) {
return query.getTimeLeft();
}
long timeLeft = query.getTimeLeft();
if (!adaptiveTimeoutCalculated) {
adaptiveTimeoutMin = (long) (timeLeft * config.minWaitAfterCoverageFactor());
adaptiveTimeoutMax = (long) (timeLeft * config.maxWaitAfterCoverageFactor());
adaptiveTimeoutCalculated = true;
}
long now = currentTime();
int pendingQueries = askedNodes - answeredNodes;
double missWidth = ((100.0 - config.minSearchCoverage()) * askedNodes) / 100.0 - 1.0;
double slopedWait = adaptiveTimeoutMin;
if (pendingQueries > 1 && missWidth > 0.0) {
slopedWait += ((adaptiveTimeoutMax - adaptiveTimeoutMin) * (pendingQueries - 1)) / missWidth;
}
long nextAdaptive = (long) slopedWait;
if (now + nextAdaptive >= deadline) {
return deadline - now;
}
deadline = now + nextAdaptive;
return nextAdaptive;
}
private void mergeResult(Result partialResult) {
collectCoverage(partialResult.getCoverage(true));
if (result == null) {
result = partialResult;
return;
}
result.mergeWith(partialResult);
result.hits().addAll(partialResult.hits().asUnorderedHits());
}
private void collectCoverage(Coverage source) {
answeredDocs += source.getDocs();
answeredActiveDocs += source.getActive();
answeredSoonActiveDocs += source.getSoonActive();
answeredNodesParticipated += source.getNodes();
answeredNodes++;
degradedByMatchPhase |= source.isDegradedByMatchPhase();
timedOut |= source.isDegradedByTimeout();
}
private Coverage createCoverage() {
adjustDegradedCoverage();
Coverage coverage = new Coverage(answeredDocs, answeredActiveDocs, answeredNodesParticipated, 1);
coverage.setNodesTried(askedNodes);
coverage.setSoonActive(answeredSoonActiveDocs);
int degradedReason = 0;
if (timedOut) {
degradedReason |= (adaptiveTimeoutCalculated ? DEGRADED_BY_ADAPTIVE_TIMEOUT : DEGRADED_BY_TIMEOUT);
}
if (degradedByMatchPhase) {
degradedReason |= DEGRADED_BY_MATCH_PHASE;
}
coverage.setDegradedReason(degradedReason);
return coverage;
}
private void adjustDegradedCoverage() {
if (askedNodes == answeredNodesParticipated) {
return;
}
int notAnswered = askedNodes - answeredNodesParticipated;
if (adaptiveTimeoutCalculated && answeredNodesParticipated > 0) {
answeredActiveDocs += (notAnswered * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (notAnswered * answeredSoonActiveDocs / answeredNodesParticipated);
} else {
if (askedNodes > answeredNodesParticipated) {
int searchableCopies = (int) searchCluster.dispatchConfig().searchableCopies();
int missingNodes = notAnswered - (searchableCopies - 1);
if (answeredNodesParticipated > 0) {
answeredActiveDocs += (missingNodes * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (missingNodes * answeredSoonActiveDocs / answeredNodesParticipated);
timedOut = true;
}
}
}
}
private void ejectInvoker(SearchInvoker invoker) {
invokers.remove(invoker);
invoker.release();
}
@Override
protected void release() {
if (!invokers.isEmpty()) {
invokers.forEach(SearchInvoker::close);
invokers.clear();
}
}
@Override
public void responseAvailable(SearchInvoker from) {
if (availableForProcessing != null) {
availableForProcessing.add(from);
}
}
@Override
protected void setMonitor(ResponseMonitor<SearchInvoker> monitor) {
}
protected long currentTime() {
return System.currentTimeMillis();
}
protected LinkedBlockingQueue<SearchInvoker> newQueue() {
return new LinkedBlockingQueue<>();
}
} |
(Same comment about level) | private void insertNetworkErrors() {
boolean asErrors = answeredNodes == 0;
if (!invokers.isEmpty()) {
String keys = invokers.stream().map(SearchInvoker::distributionKey).map(dk -> dk.map(i -> i.toString()).orElse("(unspecified)"))
.collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage
.createTimeout("Backend communication timeout on all nodes in group (distribution-keys: " + keys + ")"));
} else {
query.trace("Backend communication timeout on nodes with distribution-keys: " + keys, 5);
}
timedOut = true;
}
if (alreadyFailedNodes != null) {
var message = "Connection failure on nodes with distribution-keys: "
+ alreadyFailedNodes.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage.createBackendCommunicationError(message));
} else {
query.trace(message, 5);
}
int failed = alreadyFailedNodes.size();
askedNodes += failed;
answeredNodes += failed;
}
} | query.trace(message, 5); | private void insertNetworkErrors() {
boolean asErrors = answeredNodes == 0;
if (!invokers.isEmpty()) {
String keys = invokers.stream().map(SearchInvoker::distributionKey).map(dk -> dk.map(i -> i.toString()).orElse("(unspecified)"))
.collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage
.createTimeout("Backend communication timeout on all nodes in group (distribution-keys: " + keys + ")"));
} else {
query.trace("Backend communication timeout on nodes with distribution-keys: " + keys, 2);
}
timedOut = true;
}
if (alreadyFailedNodes != null) {
var message = "Connection failure on nodes with distribution-keys: "
+ alreadyFailedNodes.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage.createBackendCommunicationError(message));
} else {
query.trace(message, 2);
}
int failed = alreadyFailedNodes.size();
askedNodes += failed;
answeredNodes += failed;
}
} | class InterleavedSearchInvoker extends SearchInvoker implements ResponseMonitor<SearchInvoker> {
private static final Logger log = Logger.getLogger(InterleavedSearchInvoker.class.getName());
private final Set<SearchInvoker> invokers;
private final VespaBackEndSearcher searcher;
private final SearchCluster searchCluster;
private final LinkedBlockingQueue<SearchInvoker> availableForProcessing;
private final Set<Integer> alreadyFailedNodes;
private Query query;
private boolean adaptiveTimeoutCalculated = false;
private long adaptiveTimeoutMin = 0;
private long adaptiveTimeoutMax = 0;
private long deadline = 0;
private Result result = null;
private long answeredDocs = 0;
private long answeredActiveDocs = 0;
private long answeredSoonActiveDocs = 0;
private int askedNodes = 0;
private int answeredNodes = 0;
private int answeredNodesParticipated = 0;
private boolean timedOut = false;
private boolean degradedByMatchPhase = false;
private boolean trimResult = false;
public InterleavedSearchInvoker(Collection<SearchInvoker> invokers, VespaBackEndSearcher searcher, SearchCluster searchCluster, Set<Integer> alreadyFailedNodes) {
super(Optional.empty());
this.invokers = Collections.newSetFromMap(new IdentityHashMap<>());
this.invokers.addAll(invokers);
this.searcher = searcher;
this.searchCluster = searchCluster;
this.availableForProcessing = newQueue();
this.alreadyFailedNodes = alreadyFailedNodes;
}
/**
* Sends search queries to the contained {@link SearchInvoker} sub-invokers. If the search
* query has an offset other than zero, it will be reset to zero and the expected hit amount
* will be adjusted accordingly.
*/
@Override
protected void sendSearchRequest(Query query, QueryPacket queryPacket) throws IOException {
this.query = query;
invokers.forEach(invoker -> invoker.setMonitor(this));
deadline = currentTime() + query.getTimeLeft();
int originalHits = query.getHits();
int originalOffset = query.getOffset();
query.setHits(query.getHits() + query.getOffset());
query.setOffset(0);
trimResult = originalHits != query.getHits() || originalOffset != query.getOffset();
for (SearchInvoker invoker : invokers) {
invoker.sendSearchRequest(query, null);
askedNodes++;
}
query.setHits(originalHits);
query.setOffset(originalOffset);
}
@Override
protected Result getSearchResult(Execution execution) throws IOException {
long nextTimeout = query.getTimeLeft();
try {
while (!invokers.isEmpty() && nextTimeout >= 0) {
SearchInvoker invoker = availableForProcessing.poll(nextTimeout, TimeUnit.MILLISECONDS);
if (invoker == null) {
log.fine(() -> "Search timed out with " + askedNodes + " requests made, " + answeredNodes + " responses received");
break;
} else {
mergeResult(invoker.getSearchResult(execution));
ejectInvoker(invoker);
}
nextTimeout = nextTimeout();
}
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted while waiting for search results", e);
}
if (result == null) {
result = new Result(query);
}
insertNetworkErrors();
result.setCoverage(createCoverage());
trimResult(execution);
Result ret = result;
result = null;
return ret;
}
private void trimResult(Execution execution) {
if (trimResult) {
if (result.getHitOrderer() != null) {
searcher.fill(result, Execution.ATTRIBUTEPREFETCH, execution);
}
result.hits().trim(query.getOffset(), query.getHits());
}
}
private long nextTimeout() {
DispatchConfig config = searchCluster.dispatchConfig();
double minimumCoverage = config.minSearchCoverage();
if (askedNodes == answeredNodes || minimumCoverage >= 100.0) {
return query.getTimeLeft();
}
int minimumResponses = (int) Math.ceil(askedNodes * minimumCoverage / 100.0);
if (answeredNodes < minimumResponses) {
return query.getTimeLeft();
}
long timeLeft = query.getTimeLeft();
if (!adaptiveTimeoutCalculated) {
adaptiveTimeoutMin = (long) (timeLeft * config.minWaitAfterCoverageFactor());
adaptiveTimeoutMax = (long) (timeLeft * config.maxWaitAfterCoverageFactor());
adaptiveTimeoutCalculated = true;
}
long now = currentTime();
int pendingQueries = askedNodes - answeredNodes;
double missWidth = ((100.0 - config.minSearchCoverage()) * askedNodes) / 100.0 - 1.0;
double slopedWait = adaptiveTimeoutMin;
if (pendingQueries > 1 && missWidth > 0.0) {
slopedWait += ((adaptiveTimeoutMax - adaptiveTimeoutMin) * (pendingQueries - 1)) / missWidth;
}
long nextAdaptive = (long) slopedWait;
if (now + nextAdaptive >= deadline) {
return deadline - now;
}
deadline = now + nextAdaptive;
return nextAdaptive;
}
private void mergeResult(Result partialResult) {
collectCoverage(partialResult.getCoverage(true));
if (result == null) {
result = partialResult;
return;
}
result.mergeWith(partialResult);
result.hits().addAll(partialResult.hits().asUnorderedHits());
}
private void collectCoverage(Coverage source) {
answeredDocs += source.getDocs();
answeredActiveDocs += source.getActive();
answeredSoonActiveDocs += source.getSoonActive();
answeredNodesParticipated += source.getNodes();
answeredNodes++;
degradedByMatchPhase |= source.isDegradedByMatchPhase();
timedOut |= source.isDegradedByTimeout();
}
private Coverage createCoverage() {
adjustDegradedCoverage();
Coverage coverage = new Coverage(answeredDocs, answeredActiveDocs, answeredNodesParticipated, 1);
coverage.setNodesTried(askedNodes);
coverage.setSoonActive(answeredSoonActiveDocs);
int degradedReason = 0;
if (timedOut) {
degradedReason |= (adaptiveTimeoutCalculated ? DEGRADED_BY_ADAPTIVE_TIMEOUT : DEGRADED_BY_TIMEOUT);
}
if (degradedByMatchPhase) {
degradedReason |= DEGRADED_BY_MATCH_PHASE;
}
coverage.setDegradedReason(degradedReason);
return coverage;
}
private void adjustDegradedCoverage() {
if (askedNodes == answeredNodesParticipated) {
return;
}
int notAnswered = askedNodes - answeredNodesParticipated;
if (adaptiveTimeoutCalculated && answeredNodesParticipated > 0) {
answeredActiveDocs += (notAnswered * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (notAnswered * answeredSoonActiveDocs / answeredNodesParticipated);
} else {
if (askedNodes > answeredNodesParticipated) {
int searchableCopies = (int) searchCluster.dispatchConfig().searchableCopies();
int missingNodes = notAnswered - (searchableCopies - 1);
if (answeredNodesParticipated > 0) {
answeredActiveDocs += (missingNodes * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (missingNodes * answeredSoonActiveDocs / answeredNodesParticipated);
timedOut = true;
}
}
}
}
private void ejectInvoker(SearchInvoker invoker) {
invokers.remove(invoker);
invoker.release();
}
@Override
protected void release() {
if (!invokers.isEmpty()) {
invokers.forEach(SearchInvoker::close);
invokers.clear();
}
}
@Override
public void responseAvailable(SearchInvoker from) {
if (availableForProcessing != null) {
availableForProcessing.add(from);
}
}
@Override
protected void setMonitor(ResponseMonitor<SearchInvoker> monitor) {
}
protected long currentTime() {
return System.currentTimeMillis();
}
protected LinkedBlockingQueue<SearchInvoker> newQueue() {
return new LinkedBlockingQueue<>();
}
} | class InterleavedSearchInvoker extends SearchInvoker implements ResponseMonitor<SearchInvoker> {
private static final Logger log = Logger.getLogger(InterleavedSearchInvoker.class.getName());
private final Set<SearchInvoker> invokers;
private final VespaBackEndSearcher searcher;
private final SearchCluster searchCluster;
private final LinkedBlockingQueue<SearchInvoker> availableForProcessing;
private final Set<Integer> alreadyFailedNodes;
private Query query;
private boolean adaptiveTimeoutCalculated = false;
private long adaptiveTimeoutMin = 0;
private long adaptiveTimeoutMax = 0;
private long deadline = 0;
private Result result = null;
private long answeredDocs = 0;
private long answeredActiveDocs = 0;
private long answeredSoonActiveDocs = 0;
private int askedNodes = 0;
private int answeredNodes = 0;
private int answeredNodesParticipated = 0;
private boolean timedOut = false;
private boolean degradedByMatchPhase = false;
private boolean trimResult = false;
public InterleavedSearchInvoker(Collection<SearchInvoker> invokers, VespaBackEndSearcher searcher, SearchCluster searchCluster, Set<Integer> alreadyFailedNodes) {
super(Optional.empty());
this.invokers = Collections.newSetFromMap(new IdentityHashMap<>());
this.invokers.addAll(invokers);
this.searcher = searcher;
this.searchCluster = searchCluster;
this.availableForProcessing = newQueue();
this.alreadyFailedNodes = alreadyFailedNodes;
}
/**
* Sends search queries to the contained {@link SearchInvoker} sub-invokers. If the search
* query has an offset other than zero, it will be reset to zero and the expected hit amount
* will be adjusted accordingly.
*/
@Override
protected void sendSearchRequest(Query query, QueryPacket queryPacket) throws IOException {
this.query = query;
invokers.forEach(invoker -> invoker.setMonitor(this));
deadline = currentTime() + query.getTimeLeft();
int originalHits = query.getHits();
int originalOffset = query.getOffset();
query.setHits(query.getHits() + query.getOffset());
query.setOffset(0);
trimResult = originalHits != query.getHits() || originalOffset != query.getOffset();
for (SearchInvoker invoker : invokers) {
invoker.sendSearchRequest(query, null);
askedNodes++;
}
query.setHits(originalHits);
query.setOffset(originalOffset);
}
@Override
protected Result getSearchResult(Execution execution) throws IOException {
long nextTimeout = query.getTimeLeft();
try {
while (!invokers.isEmpty() && nextTimeout >= 0) {
SearchInvoker invoker = availableForProcessing.poll(nextTimeout, TimeUnit.MILLISECONDS);
if (invoker == null) {
log.fine(() -> "Search timed out with " + askedNodes + " requests made, " + answeredNodes + " responses received");
break;
} else {
mergeResult(invoker.getSearchResult(execution));
ejectInvoker(invoker);
}
nextTimeout = nextTimeout();
}
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted while waiting for search results", e);
}
if (result == null) {
result = new Result(query);
}
insertNetworkErrors();
result.setCoverage(createCoverage());
trimResult(execution);
Result ret = result;
result = null;
return ret;
}
private void trimResult(Execution execution) {
if (trimResult) {
if (result.getHitOrderer() != null) {
searcher.fill(result, Execution.ATTRIBUTEPREFETCH, execution);
}
result.hits().trim(query.getOffset(), query.getHits());
}
}
private long nextTimeout() {
DispatchConfig config = searchCluster.dispatchConfig();
double minimumCoverage = config.minSearchCoverage();
if (askedNodes == answeredNodes || minimumCoverage >= 100.0) {
return query.getTimeLeft();
}
int minimumResponses = (int) Math.ceil(askedNodes * minimumCoverage / 100.0);
if (answeredNodes < minimumResponses) {
return query.getTimeLeft();
}
long timeLeft = query.getTimeLeft();
if (!adaptiveTimeoutCalculated) {
adaptiveTimeoutMin = (long) (timeLeft * config.minWaitAfterCoverageFactor());
adaptiveTimeoutMax = (long) (timeLeft * config.maxWaitAfterCoverageFactor());
adaptiveTimeoutCalculated = true;
}
long now = currentTime();
int pendingQueries = askedNodes - answeredNodes;
double missWidth = ((100.0 - config.minSearchCoverage()) * askedNodes) / 100.0 - 1.0;
double slopedWait = adaptiveTimeoutMin;
if (pendingQueries > 1 && missWidth > 0.0) {
slopedWait += ((adaptiveTimeoutMax - adaptiveTimeoutMin) * (pendingQueries - 1)) / missWidth;
}
long nextAdaptive = (long) slopedWait;
if (now + nextAdaptive >= deadline) {
return deadline - now;
}
deadline = now + nextAdaptive;
return nextAdaptive;
}
private void mergeResult(Result partialResult) {
collectCoverage(partialResult.getCoverage(true));
if (result == null) {
result = partialResult;
return;
}
result.mergeWith(partialResult);
result.hits().addAll(partialResult.hits().asUnorderedHits());
}
private void collectCoverage(Coverage source) {
answeredDocs += source.getDocs();
answeredActiveDocs += source.getActive();
answeredSoonActiveDocs += source.getSoonActive();
answeredNodesParticipated += source.getNodes();
answeredNodes++;
degradedByMatchPhase |= source.isDegradedByMatchPhase();
timedOut |= source.isDegradedByTimeout();
}
private Coverage createCoverage() {
adjustDegradedCoverage();
Coverage coverage = new Coverage(answeredDocs, answeredActiveDocs, answeredNodesParticipated, 1);
coverage.setNodesTried(askedNodes);
coverage.setSoonActive(answeredSoonActiveDocs);
int degradedReason = 0;
if (timedOut) {
degradedReason |= (adaptiveTimeoutCalculated ? DEGRADED_BY_ADAPTIVE_TIMEOUT : DEGRADED_BY_TIMEOUT);
}
if (degradedByMatchPhase) {
degradedReason |= DEGRADED_BY_MATCH_PHASE;
}
coverage.setDegradedReason(degradedReason);
return coverage;
}
private void adjustDegradedCoverage() {
if (askedNodes == answeredNodesParticipated) {
return;
}
int notAnswered = askedNodes - answeredNodesParticipated;
if (adaptiveTimeoutCalculated && answeredNodesParticipated > 0) {
answeredActiveDocs += (notAnswered * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (notAnswered * answeredSoonActiveDocs / answeredNodesParticipated);
} else {
if (askedNodes > answeredNodesParticipated) {
int searchableCopies = (int) searchCluster.dispatchConfig().searchableCopies();
int missingNodes = notAnswered - (searchableCopies - 1);
if (answeredNodesParticipated > 0) {
answeredActiveDocs += (missingNodes * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (missingNodes * answeredSoonActiveDocs / answeredNodesParticipated);
timedOut = true;
}
}
}
}
private void ejectInvoker(SearchInvoker invoker) {
invokers.remove(invoker);
invoker.release();
}
@Override
protected void release() {
if (!invokers.isEmpty()) {
invokers.forEach(SearchInvoker::close);
invokers.clear();
}
}
@Override
public void responseAvailable(SearchInvoker from) {
if (availableForProcessing != null) {
availableForProcessing.add(from);
}
}
@Override
protected void setMonitor(ResponseMonitor<SearchInvoker> monitor) {
}
protected long currentTime() {
return System.currentTimeMillis();
}
protected LinkedBlockingQueue<SearchInvoker> newQueue() {
return new LinkedBlockingQueue<>();
}
} |
I think we should have a metric for this as well. | private void insertNetworkErrors() {
boolean asErrors = answeredNodes == 0;
if (!invokers.isEmpty()) {
String keys = invokers.stream().map(SearchInvoker::distributionKey).map(dk -> dk.map(i -> i.toString()).orElse("(unspecified)"))
.collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage
.createTimeout("Backend communication timeout on all nodes in group (distribution-keys: " + keys + ")"));
} else {
query.trace("Backend communication timeout on nodes with distribution-keys: " + keys, 5);
}
timedOut = true;
}
if (alreadyFailedNodes != null) {
var message = "Connection failure on nodes with distribution-keys: "
+ alreadyFailedNodes.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage.createBackendCommunicationError(message));
} else {
query.trace(message, 5);
}
int failed = alreadyFailedNodes.size();
askedNodes += failed;
answeredNodes += failed;
}
} | query.trace("Backend communication timeout on nodes with distribution-keys: " + keys, 5); | private void insertNetworkErrors() {
boolean asErrors = answeredNodes == 0;
if (!invokers.isEmpty()) {
String keys = invokers.stream().map(SearchInvoker::distributionKey).map(dk -> dk.map(i -> i.toString()).orElse("(unspecified)"))
.collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage
.createTimeout("Backend communication timeout on all nodes in group (distribution-keys: " + keys + ")"));
} else {
query.trace("Backend communication timeout on nodes with distribution-keys: " + keys, 2);
}
timedOut = true;
}
if (alreadyFailedNodes != null) {
var message = "Connection failure on nodes with distribution-keys: "
+ alreadyFailedNodes.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage.createBackendCommunicationError(message));
} else {
query.trace(message, 2);
}
int failed = alreadyFailedNodes.size();
askedNodes += failed;
answeredNodes += failed;
}
} | class InterleavedSearchInvoker extends SearchInvoker implements ResponseMonitor<SearchInvoker> {
private static final Logger log = Logger.getLogger(InterleavedSearchInvoker.class.getName());
private final Set<SearchInvoker> invokers;
private final VespaBackEndSearcher searcher;
private final SearchCluster searchCluster;
private final LinkedBlockingQueue<SearchInvoker> availableForProcessing;
private final Set<Integer> alreadyFailedNodes;
private Query query;
private boolean adaptiveTimeoutCalculated = false;
private long adaptiveTimeoutMin = 0;
private long adaptiveTimeoutMax = 0;
private long deadline = 0;
private Result result = null;
private long answeredDocs = 0;
private long answeredActiveDocs = 0;
private long answeredSoonActiveDocs = 0;
private int askedNodes = 0;
private int answeredNodes = 0;
private int answeredNodesParticipated = 0;
private boolean timedOut = false;
private boolean degradedByMatchPhase = false;
private boolean trimResult = false;
public InterleavedSearchInvoker(Collection<SearchInvoker> invokers, VespaBackEndSearcher searcher, SearchCluster searchCluster, Set<Integer> alreadyFailedNodes) {
super(Optional.empty());
this.invokers = Collections.newSetFromMap(new IdentityHashMap<>());
this.invokers.addAll(invokers);
this.searcher = searcher;
this.searchCluster = searchCluster;
this.availableForProcessing = newQueue();
this.alreadyFailedNodes = alreadyFailedNodes;
}
/**
* Sends search queries to the contained {@link SearchInvoker} sub-invokers. If the search
* query has an offset other than zero, it will be reset to zero and the expected hit amount
* will be adjusted accordingly.
*/
@Override
protected void sendSearchRequest(Query query, QueryPacket queryPacket) throws IOException {
this.query = query;
invokers.forEach(invoker -> invoker.setMonitor(this));
deadline = currentTime() + query.getTimeLeft();
int originalHits = query.getHits();
int originalOffset = query.getOffset();
query.setHits(query.getHits() + query.getOffset());
query.setOffset(0);
trimResult = originalHits != query.getHits() || originalOffset != query.getOffset();
for (SearchInvoker invoker : invokers) {
invoker.sendSearchRequest(query, null);
askedNodes++;
}
query.setHits(originalHits);
query.setOffset(originalOffset);
}
@Override
protected Result getSearchResult(Execution execution) throws IOException {
long nextTimeout = query.getTimeLeft();
try {
while (!invokers.isEmpty() && nextTimeout >= 0) {
SearchInvoker invoker = availableForProcessing.poll(nextTimeout, TimeUnit.MILLISECONDS);
if (invoker == null) {
log.fine(() -> "Search timed out with " + askedNodes + " requests made, " + answeredNodes + " responses received");
break;
} else {
mergeResult(invoker.getSearchResult(execution));
ejectInvoker(invoker);
}
nextTimeout = nextTimeout();
}
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted while waiting for search results", e);
}
if (result == null) {
result = new Result(query);
}
insertNetworkErrors();
result.setCoverage(createCoverage());
trimResult(execution);
Result ret = result;
result = null;
return ret;
}
private void trimResult(Execution execution) {
if (trimResult) {
if (result.getHitOrderer() != null) {
searcher.fill(result, Execution.ATTRIBUTEPREFETCH, execution);
}
result.hits().trim(query.getOffset(), query.getHits());
}
}
private long nextTimeout() {
DispatchConfig config = searchCluster.dispatchConfig();
double minimumCoverage = config.minSearchCoverage();
if (askedNodes == answeredNodes || minimumCoverage >= 100.0) {
return query.getTimeLeft();
}
int minimumResponses = (int) Math.ceil(askedNodes * minimumCoverage / 100.0);
if (answeredNodes < minimumResponses) {
return query.getTimeLeft();
}
long timeLeft = query.getTimeLeft();
if (!adaptiveTimeoutCalculated) {
adaptiveTimeoutMin = (long) (timeLeft * config.minWaitAfterCoverageFactor());
adaptiveTimeoutMax = (long) (timeLeft * config.maxWaitAfterCoverageFactor());
adaptiveTimeoutCalculated = true;
}
long now = currentTime();
int pendingQueries = askedNodes - answeredNodes;
double missWidth = ((100.0 - config.minSearchCoverage()) * askedNodes) / 100.0 - 1.0;
double slopedWait = adaptiveTimeoutMin;
if (pendingQueries > 1 && missWidth > 0.0) {
slopedWait += ((adaptiveTimeoutMax - adaptiveTimeoutMin) * (pendingQueries - 1)) / missWidth;
}
long nextAdaptive = (long) slopedWait;
if (now + nextAdaptive >= deadline) {
return deadline - now;
}
deadline = now + nextAdaptive;
return nextAdaptive;
}
private void mergeResult(Result partialResult) {
collectCoverage(partialResult.getCoverage(true));
if (result == null) {
result = partialResult;
return;
}
result.mergeWith(partialResult);
result.hits().addAll(partialResult.hits().asUnorderedHits());
}
private void collectCoverage(Coverage source) {
answeredDocs += source.getDocs();
answeredActiveDocs += source.getActive();
answeredSoonActiveDocs += source.getSoonActive();
answeredNodesParticipated += source.getNodes();
answeredNodes++;
degradedByMatchPhase |= source.isDegradedByMatchPhase();
timedOut |= source.isDegradedByTimeout();
}
private Coverage createCoverage() {
adjustDegradedCoverage();
Coverage coverage = new Coverage(answeredDocs, answeredActiveDocs, answeredNodesParticipated, 1);
coverage.setNodesTried(askedNodes);
coverage.setSoonActive(answeredSoonActiveDocs);
int degradedReason = 0;
if (timedOut) {
degradedReason |= (adaptiveTimeoutCalculated ? DEGRADED_BY_ADAPTIVE_TIMEOUT : DEGRADED_BY_TIMEOUT);
}
if (degradedByMatchPhase) {
degradedReason |= DEGRADED_BY_MATCH_PHASE;
}
coverage.setDegradedReason(degradedReason);
return coverage;
}
private void adjustDegradedCoverage() {
if (askedNodes == answeredNodesParticipated) {
return;
}
int notAnswered = askedNodes - answeredNodesParticipated;
if (adaptiveTimeoutCalculated && answeredNodesParticipated > 0) {
answeredActiveDocs += (notAnswered * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (notAnswered * answeredSoonActiveDocs / answeredNodesParticipated);
} else {
if (askedNodes > answeredNodesParticipated) {
int searchableCopies = (int) searchCluster.dispatchConfig().searchableCopies();
int missingNodes = notAnswered - (searchableCopies - 1);
if (answeredNodesParticipated > 0) {
answeredActiveDocs += (missingNodes * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (missingNodes * answeredSoonActiveDocs / answeredNodesParticipated);
timedOut = true;
}
}
}
}
private void ejectInvoker(SearchInvoker invoker) {
invokers.remove(invoker);
invoker.release();
}
@Override
protected void release() {
if (!invokers.isEmpty()) {
invokers.forEach(SearchInvoker::close);
invokers.clear();
}
}
@Override
public void responseAvailable(SearchInvoker from) {
if (availableForProcessing != null) {
availableForProcessing.add(from);
}
}
@Override
protected void setMonitor(ResponseMonitor<SearchInvoker> monitor) {
}
protected long currentTime() {
return System.currentTimeMillis();
}
protected LinkedBlockingQueue<SearchInvoker> newQueue() {
return new LinkedBlockingQueue<>();
}
} | class InterleavedSearchInvoker extends SearchInvoker implements ResponseMonitor<SearchInvoker> {
private static final Logger log = Logger.getLogger(InterleavedSearchInvoker.class.getName());
private final Set<SearchInvoker> invokers;
private final VespaBackEndSearcher searcher;
private final SearchCluster searchCluster;
private final LinkedBlockingQueue<SearchInvoker> availableForProcessing;
private final Set<Integer> alreadyFailedNodes;
private Query query;
private boolean adaptiveTimeoutCalculated = false;
private long adaptiveTimeoutMin = 0;
private long adaptiveTimeoutMax = 0;
private long deadline = 0;
private Result result = null;
private long answeredDocs = 0;
private long answeredActiveDocs = 0;
private long answeredSoonActiveDocs = 0;
private int askedNodes = 0;
private int answeredNodes = 0;
private int answeredNodesParticipated = 0;
private boolean timedOut = false;
private boolean degradedByMatchPhase = false;
private boolean trimResult = false;
public InterleavedSearchInvoker(Collection<SearchInvoker> invokers, VespaBackEndSearcher searcher, SearchCluster searchCluster, Set<Integer> alreadyFailedNodes) {
super(Optional.empty());
this.invokers = Collections.newSetFromMap(new IdentityHashMap<>());
this.invokers.addAll(invokers);
this.searcher = searcher;
this.searchCluster = searchCluster;
this.availableForProcessing = newQueue();
this.alreadyFailedNodes = alreadyFailedNodes;
}
/**
* Sends search queries to the contained {@link SearchInvoker} sub-invokers. If the search
* query has an offset other than zero, it will be reset to zero and the expected hit amount
* will be adjusted accordingly.
*/
@Override
protected void sendSearchRequest(Query query, QueryPacket queryPacket) throws IOException {
this.query = query;
invokers.forEach(invoker -> invoker.setMonitor(this));
deadline = currentTime() + query.getTimeLeft();
int originalHits = query.getHits();
int originalOffset = query.getOffset();
query.setHits(query.getHits() + query.getOffset());
query.setOffset(0);
trimResult = originalHits != query.getHits() || originalOffset != query.getOffset();
for (SearchInvoker invoker : invokers) {
invoker.sendSearchRequest(query, null);
askedNodes++;
}
query.setHits(originalHits);
query.setOffset(originalOffset);
}
@Override
protected Result getSearchResult(Execution execution) throws IOException {
long nextTimeout = query.getTimeLeft();
try {
while (!invokers.isEmpty() && nextTimeout >= 0) {
SearchInvoker invoker = availableForProcessing.poll(nextTimeout, TimeUnit.MILLISECONDS);
if (invoker == null) {
log.fine(() -> "Search timed out with " + askedNodes + " requests made, " + answeredNodes + " responses received");
break;
} else {
mergeResult(invoker.getSearchResult(execution));
ejectInvoker(invoker);
}
nextTimeout = nextTimeout();
}
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted while waiting for search results", e);
}
if (result == null) {
result = new Result(query);
}
insertNetworkErrors();
result.setCoverage(createCoverage());
trimResult(execution);
Result ret = result;
result = null;
return ret;
}
private void trimResult(Execution execution) {
if (trimResult) {
if (result.getHitOrderer() != null) {
searcher.fill(result, Execution.ATTRIBUTEPREFETCH, execution);
}
result.hits().trim(query.getOffset(), query.getHits());
}
}
private long nextTimeout() {
DispatchConfig config = searchCluster.dispatchConfig();
double minimumCoverage = config.minSearchCoverage();
if (askedNodes == answeredNodes || minimumCoverage >= 100.0) {
return query.getTimeLeft();
}
int minimumResponses = (int) Math.ceil(askedNodes * minimumCoverage / 100.0);
if (answeredNodes < minimumResponses) {
return query.getTimeLeft();
}
long timeLeft = query.getTimeLeft();
if (!adaptiveTimeoutCalculated) {
adaptiveTimeoutMin = (long) (timeLeft * config.minWaitAfterCoverageFactor());
adaptiveTimeoutMax = (long) (timeLeft * config.maxWaitAfterCoverageFactor());
adaptiveTimeoutCalculated = true;
}
long now = currentTime();
int pendingQueries = askedNodes - answeredNodes;
double missWidth = ((100.0 - config.minSearchCoverage()) * askedNodes) / 100.0 - 1.0;
double slopedWait = adaptiveTimeoutMin;
if (pendingQueries > 1 && missWidth > 0.0) {
slopedWait += ((adaptiveTimeoutMax - adaptiveTimeoutMin) * (pendingQueries - 1)) / missWidth;
}
long nextAdaptive = (long) slopedWait;
if (now + nextAdaptive >= deadline) {
return deadline - now;
}
deadline = now + nextAdaptive;
return nextAdaptive;
}
private void mergeResult(Result partialResult) {
collectCoverage(partialResult.getCoverage(true));
if (result == null) {
result = partialResult;
return;
}
result.mergeWith(partialResult);
result.hits().addAll(partialResult.hits().asUnorderedHits());
}
private void collectCoverage(Coverage source) {
answeredDocs += source.getDocs();
answeredActiveDocs += source.getActive();
answeredSoonActiveDocs += source.getSoonActive();
answeredNodesParticipated += source.getNodes();
answeredNodes++;
degradedByMatchPhase |= source.isDegradedByMatchPhase();
timedOut |= source.isDegradedByTimeout();
}
private Coverage createCoverage() {
adjustDegradedCoverage();
Coverage coverage = new Coverage(answeredDocs, answeredActiveDocs, answeredNodesParticipated, 1);
coverage.setNodesTried(askedNodes);
coverage.setSoonActive(answeredSoonActiveDocs);
int degradedReason = 0;
if (timedOut) {
degradedReason |= (adaptiveTimeoutCalculated ? DEGRADED_BY_ADAPTIVE_TIMEOUT : DEGRADED_BY_TIMEOUT);
}
if (degradedByMatchPhase) {
degradedReason |= DEGRADED_BY_MATCH_PHASE;
}
coverage.setDegradedReason(degradedReason);
return coverage;
}
private void adjustDegradedCoverage() {
if (askedNodes == answeredNodesParticipated) {
return;
}
int notAnswered = askedNodes - answeredNodesParticipated;
if (adaptiveTimeoutCalculated && answeredNodesParticipated > 0) {
answeredActiveDocs += (notAnswered * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (notAnswered * answeredSoonActiveDocs / answeredNodesParticipated);
} else {
if (askedNodes > answeredNodesParticipated) {
int searchableCopies = (int) searchCluster.dispatchConfig().searchableCopies();
int missingNodes = notAnswered - (searchableCopies - 1);
if (answeredNodesParticipated > 0) {
answeredActiveDocs += (missingNodes * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (missingNodes * answeredSoonActiveDocs / answeredNodesParticipated);
timedOut = true;
}
}
}
}
private void ejectInvoker(SearchInvoker invoker) {
invokers.remove(invoker);
invoker.release();
}
@Override
protected void release() {
if (!invokers.isEmpty()) {
invokers.forEach(SearchInvoker::close);
invokers.clear();
}
}
@Override
public void responseAvailable(SearchInvoker from) {
if (availableForProcessing != null) {
availableForProcessing.add(from);
}
}
@Override
protected void setMonitor(ResponseMonitor<SearchInvoker> monitor) {
}
protected long currentTime() {
return System.currentTimeMillis();
}
protected LinkedBlockingQueue<SearchInvoker> newQueue() {
return new LinkedBlockingQueue<>();
}
} |
Looks like deserialization would work either way. If we always write the name, the old deserializer code would just return `Optional.empty()` since `NodeFlavors` would not know about non-configured flavor. If we don't write it, then I guess `object.field(hostSpecFlavor).valid()` is `false` so that works too. :+1: | private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
}
else {
NodeResources resources = flavor.resources();
Cursor resourcesObject = object.setObject(resourcesKey);
resourcesObject.setDouble(vcpuKey, resources.vcpu());
resourcesObject.setDouble(memoryKey, resources.memoryGb());
resourcesObject.setDouble(diskKey, resources.diskGb());
}
} | object.setString(flavorKey, flavor.name()); | private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
}
else {
NodeResources resources = flavor.resources();
Cursor resourcesObject = object.setObject(resourcesKey);
resourcesObject.setDouble(vcpuKey, resources.vcpu());
resourcesObject.setDouble(memoryKey, resources.memoryGb());
resourcesObject.setDouble(diskKey, resources.diskGb());
}
} | class AllocatedHosts {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostNameKey = "hostName";
private static final String aliasesKey = "aliases";
private static final String hostSpecMembershipKey = "membership";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String vcpuKey = "vcpu";
private static final String memoryKey = "memory";
private static final String diskKey = "disk";
/** Wanted version */
private static final String hostSpecVespaVersionKey = "vespaVersion";
/** Current version */
private static final String hostSpecCurrentVespaVersionKey = "currentVespaVersion";
private static final String hostSpecNetworkPortsKey = "ports";
private final ImmutableSet<HostSpec> hosts;
AllocatedHosts(Set<HostSpec> hosts) {
this.hosts = ImmutableSet.copyOf(hosts);
}
public static AllocatedHosts withHosts(Set<HostSpec> hosts) {
return new AllocatedHosts(hosts);
}
private void toSlime(Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : hosts)
toSlime(host, array.addObject().setObject(hostSpecKey));
}
private void toSlime(HostSpec host, Cursor cursor) {
cursor.setString(hostSpecHostNameKey, host.hostname());
aliasesToSlime(host, cursor);
host.membership().ifPresent(membership -> {
cursor.setString(hostSpecMembershipKey, membership.stringValue());
cursor.setString(hostSpecVespaVersionKey, membership.cluster().vespaVersion().toFullString());
});
host.flavor().ifPresent(flavor -> toSlime(flavor, cursor));
host.version().ifPresent(version -> cursor.setString(hostSpecCurrentVespaVersionKey, version.toFullString()));
host.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, cursor.setArray(hostSpecNetworkPortsKey)));
}
private void aliasesToSlime(HostSpec spec, Cursor cursor) {
if (spec.aliases().isEmpty()) return;
Cursor aliases = cursor.setArray(aliasesKey);
for (String alias : spec.aliases())
aliases.addString(alias);
}
/** Returns the hosts of this allocation */
public Set<HostSpec> getHosts() { return hosts; }
private static AllocatedHosts fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse((ArrayTraverser)(i, host) -> hosts.add(hostFromSlime(host.field(hostSpecKey), nodeFlavors)));
return new AllocatedHosts(hosts);
}
static HostSpec hostFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) {
Optional<ClusterMembership> membership =
object.field(hostSpecMembershipKey).valid() ? Optional.of(membershipFromSlime(object)) : Optional.empty();
Optional<Flavor> flavor = flavorFromSlime(object, nodeFlavors);
Optional<com.yahoo.component.Version> version =
optionalString(object.field(hostSpecCurrentVespaVersionKey)).map(com.yahoo.component.Version::new);
Optional<NetworkPorts> networkPorts =
NetworkPortsSerializer.fromSlime(object.field(hostSpecNetworkPortsKey));
return new HostSpec(object.field(hostSpecHostNameKey).asString(), aliasesFromSlime(object), flavor, membership, version, networkPorts);
}
private static List<String> aliasesFromSlime(Inspector object) {
if ( ! object.field(aliasesKey).valid()) return Collections.emptyList();
List<String> aliases = new ArrayList<>();
object.field(aliasesKey).traverse((ArrayTraverser)(index, alias) -> aliases.add(alias.asString()));
return aliases;
}
private static Optional<Flavor> flavorFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) {
if (object.field(flavorKey).valid() && nodeFlavors.isPresent() && nodeFlavors.get().exists(object.field(flavorKey).asString())) {
return nodeFlavors.get().getFlavor(object.field(flavorKey).asString());
}
else if (object.field(resourcesKey).valid()) {
Inspector resources = object.field(resourcesKey);
return Optional.of(new Flavor(new NodeResources(resources.field(vcpuKey).asDouble(),
resources.field(memoryKey).asDouble(),
resources.field(diskKey).asDouble())));
}
else {
return Optional.empty();
}
}
private static ClusterMembership membershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembershipKey).asString(),
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersionKey).asString()));
}
private static Optional<String> optionalString(Inspector inspector) {
if ( ! inspector.valid()) return Optional.empty();
return Optional.of(inspector.asString());
}
public byte[] toJson() throws IOException {
Slime slime = new Slime();
toSlime(slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static AllocatedHosts fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) {
return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors);
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof AllocatedHosts)) return false;
return ((AllocatedHosts) other).hosts.equals(this.hosts);
}
@Override
public int hashCode() {
return hosts.hashCode();
}
@Override
public String toString() {
return hosts.toString();
}
} | class AllocatedHosts {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostNameKey = "hostName";
private static final String aliasesKey = "aliases";
private static final String hostSpecMembershipKey = "membership";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String vcpuKey = "vcpu";
private static final String memoryKey = "memory";
private static final String diskKey = "disk";
/** Wanted version */
private static final String hostSpecVespaVersionKey = "vespaVersion";
/** Current version */
private static final String hostSpecCurrentVespaVersionKey = "currentVespaVersion";
private static final String hostSpecNetworkPortsKey = "ports";
private final ImmutableSet<HostSpec> hosts;
AllocatedHosts(Set<HostSpec> hosts) {
this.hosts = ImmutableSet.copyOf(hosts);
}
public static AllocatedHosts withHosts(Set<HostSpec> hosts) {
return new AllocatedHosts(hosts);
}
private void toSlime(Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : hosts)
toSlime(host, array.addObject().setObject(hostSpecKey));
}
private void toSlime(HostSpec host, Cursor cursor) {
cursor.setString(hostSpecHostNameKey, host.hostname());
aliasesToSlime(host, cursor);
host.membership().ifPresent(membership -> {
cursor.setString(hostSpecMembershipKey, membership.stringValue());
cursor.setString(hostSpecVespaVersionKey, membership.cluster().vespaVersion().toFullString());
});
host.flavor().ifPresent(flavor -> toSlime(flavor, cursor));
host.version().ifPresent(version -> cursor.setString(hostSpecCurrentVespaVersionKey, version.toFullString()));
host.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, cursor.setArray(hostSpecNetworkPortsKey)));
}
private void aliasesToSlime(HostSpec spec, Cursor cursor) {
if (spec.aliases().isEmpty()) return;
Cursor aliases = cursor.setArray(aliasesKey);
for (String alias : spec.aliases())
aliases.addString(alias);
}
/** Returns the hosts of this allocation */
public Set<HostSpec> getHosts() { return hosts; }
private static AllocatedHosts fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse((ArrayTraverser)(i, host) -> hosts.add(hostFromSlime(host.field(hostSpecKey), nodeFlavors)));
return new AllocatedHosts(hosts);
}
static HostSpec hostFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) {
Optional<ClusterMembership> membership =
object.field(hostSpecMembershipKey).valid() ? Optional.of(membershipFromSlime(object)) : Optional.empty();
Optional<Flavor> flavor = flavorFromSlime(object, nodeFlavors);
Optional<com.yahoo.component.Version> version =
optionalString(object.field(hostSpecCurrentVespaVersionKey)).map(com.yahoo.component.Version::new);
Optional<NetworkPorts> networkPorts =
NetworkPortsSerializer.fromSlime(object.field(hostSpecNetworkPortsKey));
return new HostSpec(object.field(hostSpecHostNameKey).asString(), aliasesFromSlime(object), flavor, membership, version, networkPorts);
}
private static List<String> aliasesFromSlime(Inspector object) {
if ( ! object.field(aliasesKey).valid()) return Collections.emptyList();
List<String> aliases = new ArrayList<>();
object.field(aliasesKey).traverse((ArrayTraverser)(index, alias) -> aliases.add(alias.asString()));
return aliases;
}
private static Optional<Flavor> flavorFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) {
if (object.field(flavorKey).valid() && nodeFlavors.isPresent() && nodeFlavors.get().exists(object.field(flavorKey).asString())) {
return nodeFlavors.get().getFlavor(object.field(flavorKey).asString());
}
else if (object.field(resourcesKey).valid()) {
Inspector resources = object.field(resourcesKey);
return Optional.of(new Flavor(new NodeResources(resources.field(vcpuKey).asDouble(),
resources.field(memoryKey).asDouble(),
resources.field(diskKey).asDouble())));
}
else {
return Optional.empty();
}
}
private static ClusterMembership membershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembershipKey).asString(),
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersionKey).asString()));
}
private static Optional<String> optionalString(Inspector inspector) {
if ( ! inspector.valid()) return Optional.empty();
return Optional.of(inspector.asString());
}
public byte[] toJson() throws IOException {
Slime slime = new Slime();
toSlime(slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static AllocatedHosts fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) {
return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors);
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof AllocatedHosts)) return false;
return ((AllocatedHosts) other).hosts.equals(this.hosts);
}
@Override
public int hashCode() {
return hosts.hashCode();
}
@Override
public String toString() {
return hosts.toString();
}
} |
Changed to 2 in new commit. I'll add the metrics as a separate sprint task | private void insertNetworkErrors() {
boolean asErrors = answeredNodes == 0;
if (!invokers.isEmpty()) {
String keys = invokers.stream().map(SearchInvoker::distributionKey).map(dk -> dk.map(i -> i.toString()).orElse("(unspecified)"))
.collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage
.createTimeout("Backend communication timeout on all nodes in group (distribution-keys: " + keys + ")"));
} else {
query.trace("Backend communication timeout on nodes with distribution-keys: " + keys, 5);
}
timedOut = true;
}
if (alreadyFailedNodes != null) {
var message = "Connection failure on nodes with distribution-keys: "
+ alreadyFailedNodes.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage.createBackendCommunicationError(message));
} else {
query.trace(message, 5);
}
int failed = alreadyFailedNodes.size();
askedNodes += failed;
answeredNodes += failed;
}
} | query.trace("Backend communication timeout on nodes with distribution-keys: " + keys, 5); | private void insertNetworkErrors() {
boolean asErrors = answeredNodes == 0;
if (!invokers.isEmpty()) {
String keys = invokers.stream().map(SearchInvoker::distributionKey).map(dk -> dk.map(i -> i.toString()).orElse("(unspecified)"))
.collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage
.createTimeout("Backend communication timeout on all nodes in group (distribution-keys: " + keys + ")"));
} else {
query.trace("Backend communication timeout on nodes with distribution-keys: " + keys, 2);
}
timedOut = true;
}
if (alreadyFailedNodes != null) {
var message = "Connection failure on nodes with distribution-keys: "
+ alreadyFailedNodes.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(", "));
if (asErrors) {
result.hits().addError(ErrorMessage.createBackendCommunicationError(message));
} else {
query.trace(message, 2);
}
int failed = alreadyFailedNodes.size();
askedNodes += failed;
answeredNodes += failed;
}
} | class InterleavedSearchInvoker extends SearchInvoker implements ResponseMonitor<SearchInvoker> {
private static final Logger log = Logger.getLogger(InterleavedSearchInvoker.class.getName());
private final Set<SearchInvoker> invokers;
private final VespaBackEndSearcher searcher;
private final SearchCluster searchCluster;
private final LinkedBlockingQueue<SearchInvoker> availableForProcessing;
private final Set<Integer> alreadyFailedNodes;
private Query query;
private boolean adaptiveTimeoutCalculated = false;
private long adaptiveTimeoutMin = 0;
private long adaptiveTimeoutMax = 0;
private long deadline = 0;
private Result result = null;
private long answeredDocs = 0;
private long answeredActiveDocs = 0;
private long answeredSoonActiveDocs = 0;
private int askedNodes = 0;
private int answeredNodes = 0;
private int answeredNodesParticipated = 0;
private boolean timedOut = false;
private boolean degradedByMatchPhase = false;
private boolean trimResult = false;
public InterleavedSearchInvoker(Collection<SearchInvoker> invokers, VespaBackEndSearcher searcher, SearchCluster searchCluster, Set<Integer> alreadyFailedNodes) {
super(Optional.empty());
this.invokers = Collections.newSetFromMap(new IdentityHashMap<>());
this.invokers.addAll(invokers);
this.searcher = searcher;
this.searchCluster = searchCluster;
this.availableForProcessing = newQueue();
this.alreadyFailedNodes = alreadyFailedNodes;
}
/**
* Sends search queries to the contained {@link SearchInvoker} sub-invokers. If the search
* query has an offset other than zero, it will be reset to zero and the expected hit amount
* will be adjusted accordingly.
*/
@Override
protected void sendSearchRequest(Query query, QueryPacket queryPacket) throws IOException {
this.query = query;
invokers.forEach(invoker -> invoker.setMonitor(this));
deadline = currentTime() + query.getTimeLeft();
int originalHits = query.getHits();
int originalOffset = query.getOffset();
query.setHits(query.getHits() + query.getOffset());
query.setOffset(0);
trimResult = originalHits != query.getHits() || originalOffset != query.getOffset();
for (SearchInvoker invoker : invokers) {
invoker.sendSearchRequest(query, null);
askedNodes++;
}
query.setHits(originalHits);
query.setOffset(originalOffset);
}
@Override
protected Result getSearchResult(Execution execution) throws IOException {
long nextTimeout = query.getTimeLeft();
try {
while (!invokers.isEmpty() && nextTimeout >= 0) {
SearchInvoker invoker = availableForProcessing.poll(nextTimeout, TimeUnit.MILLISECONDS);
if (invoker == null) {
log.fine(() -> "Search timed out with " + askedNodes + " requests made, " + answeredNodes + " responses received");
break;
} else {
mergeResult(invoker.getSearchResult(execution));
ejectInvoker(invoker);
}
nextTimeout = nextTimeout();
}
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted while waiting for search results", e);
}
if (result == null) {
result = new Result(query);
}
insertNetworkErrors();
result.setCoverage(createCoverage());
trimResult(execution);
Result ret = result;
result = null;
return ret;
}
private void trimResult(Execution execution) {
if (trimResult) {
if (result.getHitOrderer() != null) {
searcher.fill(result, Execution.ATTRIBUTEPREFETCH, execution);
}
result.hits().trim(query.getOffset(), query.getHits());
}
}
private long nextTimeout() {
DispatchConfig config = searchCluster.dispatchConfig();
double minimumCoverage = config.minSearchCoverage();
if (askedNodes == answeredNodes || minimumCoverage >= 100.0) {
return query.getTimeLeft();
}
int minimumResponses = (int) Math.ceil(askedNodes * minimumCoverage / 100.0);
if (answeredNodes < minimumResponses) {
return query.getTimeLeft();
}
long timeLeft = query.getTimeLeft();
if (!adaptiveTimeoutCalculated) {
adaptiveTimeoutMin = (long) (timeLeft * config.minWaitAfterCoverageFactor());
adaptiveTimeoutMax = (long) (timeLeft * config.maxWaitAfterCoverageFactor());
adaptiveTimeoutCalculated = true;
}
long now = currentTime();
int pendingQueries = askedNodes - answeredNodes;
double missWidth = ((100.0 - config.minSearchCoverage()) * askedNodes) / 100.0 - 1.0;
double slopedWait = adaptiveTimeoutMin;
if (pendingQueries > 1 && missWidth > 0.0) {
slopedWait += ((adaptiveTimeoutMax - adaptiveTimeoutMin) * (pendingQueries - 1)) / missWidth;
}
long nextAdaptive = (long) slopedWait;
if (now + nextAdaptive >= deadline) {
return deadline - now;
}
deadline = now + nextAdaptive;
return nextAdaptive;
}
private void mergeResult(Result partialResult) {
collectCoverage(partialResult.getCoverage(true));
if (result == null) {
result = partialResult;
return;
}
result.mergeWith(partialResult);
result.hits().addAll(partialResult.hits().asUnorderedHits());
}
private void collectCoverage(Coverage source) {
answeredDocs += source.getDocs();
answeredActiveDocs += source.getActive();
answeredSoonActiveDocs += source.getSoonActive();
answeredNodesParticipated += source.getNodes();
answeredNodes++;
degradedByMatchPhase |= source.isDegradedByMatchPhase();
timedOut |= source.isDegradedByTimeout();
}
private Coverage createCoverage() {
adjustDegradedCoverage();
Coverage coverage = new Coverage(answeredDocs, answeredActiveDocs, answeredNodesParticipated, 1);
coverage.setNodesTried(askedNodes);
coverage.setSoonActive(answeredSoonActiveDocs);
int degradedReason = 0;
if (timedOut) {
degradedReason |= (adaptiveTimeoutCalculated ? DEGRADED_BY_ADAPTIVE_TIMEOUT : DEGRADED_BY_TIMEOUT);
}
if (degradedByMatchPhase) {
degradedReason |= DEGRADED_BY_MATCH_PHASE;
}
coverage.setDegradedReason(degradedReason);
return coverage;
}
private void adjustDegradedCoverage() {
if (askedNodes == answeredNodesParticipated) {
return;
}
int notAnswered = askedNodes - answeredNodesParticipated;
if (adaptiveTimeoutCalculated && answeredNodesParticipated > 0) {
answeredActiveDocs += (notAnswered * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (notAnswered * answeredSoonActiveDocs / answeredNodesParticipated);
} else {
if (askedNodes > answeredNodesParticipated) {
int searchableCopies = (int) searchCluster.dispatchConfig().searchableCopies();
int missingNodes = notAnswered - (searchableCopies - 1);
if (answeredNodesParticipated > 0) {
answeredActiveDocs += (missingNodes * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (missingNodes * answeredSoonActiveDocs / answeredNodesParticipated);
timedOut = true;
}
}
}
}
private void ejectInvoker(SearchInvoker invoker) {
invokers.remove(invoker);
invoker.release();
}
@Override
protected void release() {
if (!invokers.isEmpty()) {
invokers.forEach(SearchInvoker::close);
invokers.clear();
}
}
@Override
public void responseAvailable(SearchInvoker from) {
if (availableForProcessing != null) {
availableForProcessing.add(from);
}
}
@Override
protected void setMonitor(ResponseMonitor<SearchInvoker> monitor) {
}
protected long currentTime() {
return System.currentTimeMillis();
}
protected LinkedBlockingQueue<SearchInvoker> newQueue() {
return new LinkedBlockingQueue<>();
}
} | class InterleavedSearchInvoker extends SearchInvoker implements ResponseMonitor<SearchInvoker> {
private static final Logger log = Logger.getLogger(InterleavedSearchInvoker.class.getName());
private final Set<SearchInvoker> invokers;
private final VespaBackEndSearcher searcher;
private final SearchCluster searchCluster;
private final LinkedBlockingQueue<SearchInvoker> availableForProcessing;
private final Set<Integer> alreadyFailedNodes;
private Query query;
private boolean adaptiveTimeoutCalculated = false;
private long adaptiveTimeoutMin = 0;
private long adaptiveTimeoutMax = 0;
private long deadline = 0;
private Result result = null;
private long answeredDocs = 0;
private long answeredActiveDocs = 0;
private long answeredSoonActiveDocs = 0;
private int askedNodes = 0;
private int answeredNodes = 0;
private int answeredNodesParticipated = 0;
private boolean timedOut = false;
private boolean degradedByMatchPhase = false;
private boolean trimResult = false;
public InterleavedSearchInvoker(Collection<SearchInvoker> invokers, VespaBackEndSearcher searcher, SearchCluster searchCluster, Set<Integer> alreadyFailedNodes) {
super(Optional.empty());
this.invokers = Collections.newSetFromMap(new IdentityHashMap<>());
this.invokers.addAll(invokers);
this.searcher = searcher;
this.searchCluster = searchCluster;
this.availableForProcessing = newQueue();
this.alreadyFailedNodes = alreadyFailedNodes;
}
/**
* Sends search queries to the contained {@link SearchInvoker} sub-invokers. If the search
* query has an offset other than zero, it will be reset to zero and the expected hit amount
* will be adjusted accordingly.
*/
@Override
protected void sendSearchRequest(Query query, QueryPacket queryPacket) throws IOException {
this.query = query;
invokers.forEach(invoker -> invoker.setMonitor(this));
deadline = currentTime() + query.getTimeLeft();
int originalHits = query.getHits();
int originalOffset = query.getOffset();
query.setHits(query.getHits() + query.getOffset());
query.setOffset(0);
trimResult = originalHits != query.getHits() || originalOffset != query.getOffset();
for (SearchInvoker invoker : invokers) {
invoker.sendSearchRequest(query, null);
askedNodes++;
}
query.setHits(originalHits);
query.setOffset(originalOffset);
}
@Override
protected Result getSearchResult(Execution execution) throws IOException {
long nextTimeout = query.getTimeLeft();
try {
while (!invokers.isEmpty() && nextTimeout >= 0) {
SearchInvoker invoker = availableForProcessing.poll(nextTimeout, TimeUnit.MILLISECONDS);
if (invoker == null) {
log.fine(() -> "Search timed out with " + askedNodes + " requests made, " + answeredNodes + " responses received");
break;
} else {
mergeResult(invoker.getSearchResult(execution));
ejectInvoker(invoker);
}
nextTimeout = nextTimeout();
}
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted while waiting for search results", e);
}
if (result == null) {
result = new Result(query);
}
insertNetworkErrors();
result.setCoverage(createCoverage());
trimResult(execution);
Result ret = result;
result = null;
return ret;
}
private void trimResult(Execution execution) {
if (trimResult) {
if (result.getHitOrderer() != null) {
searcher.fill(result, Execution.ATTRIBUTEPREFETCH, execution);
}
result.hits().trim(query.getOffset(), query.getHits());
}
}
private long nextTimeout() {
DispatchConfig config = searchCluster.dispatchConfig();
double minimumCoverage = config.minSearchCoverage();
if (askedNodes == answeredNodes || minimumCoverage >= 100.0) {
return query.getTimeLeft();
}
int minimumResponses = (int) Math.ceil(askedNodes * minimumCoverage / 100.0);
if (answeredNodes < minimumResponses) {
return query.getTimeLeft();
}
long timeLeft = query.getTimeLeft();
if (!adaptiveTimeoutCalculated) {
adaptiveTimeoutMin = (long) (timeLeft * config.minWaitAfterCoverageFactor());
adaptiveTimeoutMax = (long) (timeLeft * config.maxWaitAfterCoverageFactor());
adaptiveTimeoutCalculated = true;
}
long now = currentTime();
int pendingQueries = askedNodes - answeredNodes;
double missWidth = ((100.0 - config.minSearchCoverage()) * askedNodes) / 100.0 - 1.0;
double slopedWait = adaptiveTimeoutMin;
if (pendingQueries > 1 && missWidth > 0.0) {
slopedWait += ((adaptiveTimeoutMax - adaptiveTimeoutMin) * (pendingQueries - 1)) / missWidth;
}
long nextAdaptive = (long) slopedWait;
if (now + nextAdaptive >= deadline) {
return deadline - now;
}
deadline = now + nextAdaptive;
return nextAdaptive;
}
private void mergeResult(Result partialResult) {
collectCoverage(partialResult.getCoverage(true));
if (result == null) {
result = partialResult;
return;
}
result.mergeWith(partialResult);
result.hits().addAll(partialResult.hits().asUnorderedHits());
}
private void collectCoverage(Coverage source) {
answeredDocs += source.getDocs();
answeredActiveDocs += source.getActive();
answeredSoonActiveDocs += source.getSoonActive();
answeredNodesParticipated += source.getNodes();
answeredNodes++;
degradedByMatchPhase |= source.isDegradedByMatchPhase();
timedOut |= source.isDegradedByTimeout();
}
private Coverage createCoverage() {
adjustDegradedCoverage();
Coverage coverage = new Coverage(answeredDocs, answeredActiveDocs, answeredNodesParticipated, 1);
coverage.setNodesTried(askedNodes);
coverage.setSoonActive(answeredSoonActiveDocs);
int degradedReason = 0;
if (timedOut) {
degradedReason |= (adaptiveTimeoutCalculated ? DEGRADED_BY_ADAPTIVE_TIMEOUT : DEGRADED_BY_TIMEOUT);
}
if (degradedByMatchPhase) {
degradedReason |= DEGRADED_BY_MATCH_PHASE;
}
coverage.setDegradedReason(degradedReason);
return coverage;
}
private void adjustDegradedCoverage() {
if (askedNodes == answeredNodesParticipated) {
return;
}
int notAnswered = askedNodes - answeredNodesParticipated;
if (adaptiveTimeoutCalculated && answeredNodesParticipated > 0) {
answeredActiveDocs += (notAnswered * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (notAnswered * answeredSoonActiveDocs / answeredNodesParticipated);
} else {
if (askedNodes > answeredNodesParticipated) {
int searchableCopies = (int) searchCluster.dispatchConfig().searchableCopies();
int missingNodes = notAnswered - (searchableCopies - 1);
if (answeredNodesParticipated > 0) {
answeredActiveDocs += (missingNodes * answeredActiveDocs / answeredNodesParticipated);
answeredSoonActiveDocs += (missingNodes * answeredSoonActiveDocs / answeredNodesParticipated);
timedOut = true;
}
}
}
}
private void ejectInvoker(SearchInvoker invoker) {
invokers.remove(invoker);
invoker.release();
}
@Override
protected void release() {
if (!invokers.isEmpty()) {
invokers.forEach(SearchInvoker::close);
invokers.clear();
}
}
@Override
public void responseAvailable(SearchInvoker from) {
if (availableForProcessing != null) {
availableForProcessing.add(from);
}
}
@Override
protected void setMonitor(ResponseMonitor<SearchInvoker> monitor) {
}
protected long currentTime() {
return System.currentTimeMillis();
}
protected LinkedBlockingQueue<SearchInvoker> newQueue() {
return new LinkedBlockingQueue<>();
}
} |
Yep :-) | private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
}
else {
NodeResources resources = flavor.resources();
Cursor resourcesObject = object.setObject(resourcesKey);
resourcesObject.setDouble(vcpuKey, resources.vcpu());
resourcesObject.setDouble(memoryKey, resources.memoryGb());
resourcesObject.setDouble(diskKey, resources.diskGb());
}
} | object.setString(flavorKey, flavor.name()); | private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
}
else {
NodeResources resources = flavor.resources();
Cursor resourcesObject = object.setObject(resourcesKey);
resourcesObject.setDouble(vcpuKey, resources.vcpu());
resourcesObject.setDouble(memoryKey, resources.memoryGb());
resourcesObject.setDouble(diskKey, resources.diskGb());
}
} | class AllocatedHosts {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostNameKey = "hostName";
private static final String aliasesKey = "aliases";
private static final String hostSpecMembershipKey = "membership";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String vcpuKey = "vcpu";
private static final String memoryKey = "memory";
private static final String diskKey = "disk";
/** Wanted version */
private static final String hostSpecVespaVersionKey = "vespaVersion";
/** Current version */
private static final String hostSpecCurrentVespaVersionKey = "currentVespaVersion";
private static final String hostSpecNetworkPortsKey = "ports";
private final ImmutableSet<HostSpec> hosts;
AllocatedHosts(Set<HostSpec> hosts) {
this.hosts = ImmutableSet.copyOf(hosts);
}
public static AllocatedHosts withHosts(Set<HostSpec> hosts) {
return new AllocatedHosts(hosts);
}
private void toSlime(Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : hosts)
toSlime(host, array.addObject().setObject(hostSpecKey));
}
private void toSlime(HostSpec host, Cursor cursor) {
cursor.setString(hostSpecHostNameKey, host.hostname());
aliasesToSlime(host, cursor);
host.membership().ifPresent(membership -> {
cursor.setString(hostSpecMembershipKey, membership.stringValue());
cursor.setString(hostSpecVespaVersionKey, membership.cluster().vespaVersion().toFullString());
});
host.flavor().ifPresent(flavor -> toSlime(flavor, cursor));
host.version().ifPresent(version -> cursor.setString(hostSpecCurrentVespaVersionKey, version.toFullString()));
host.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, cursor.setArray(hostSpecNetworkPortsKey)));
}
private void aliasesToSlime(HostSpec spec, Cursor cursor) {
if (spec.aliases().isEmpty()) return;
Cursor aliases = cursor.setArray(aliasesKey);
for (String alias : spec.aliases())
aliases.addString(alias);
}
/** Returns the hosts of this allocation */
public Set<HostSpec> getHosts() { return hosts; }
private static AllocatedHosts fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse((ArrayTraverser)(i, host) -> hosts.add(hostFromSlime(host.field(hostSpecKey), nodeFlavors)));
return new AllocatedHosts(hosts);
}
static HostSpec hostFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) {
Optional<ClusterMembership> membership =
object.field(hostSpecMembershipKey).valid() ? Optional.of(membershipFromSlime(object)) : Optional.empty();
Optional<Flavor> flavor = flavorFromSlime(object, nodeFlavors);
Optional<com.yahoo.component.Version> version =
optionalString(object.field(hostSpecCurrentVespaVersionKey)).map(com.yahoo.component.Version::new);
Optional<NetworkPorts> networkPorts =
NetworkPortsSerializer.fromSlime(object.field(hostSpecNetworkPortsKey));
return new HostSpec(object.field(hostSpecHostNameKey).asString(), aliasesFromSlime(object), flavor, membership, version, networkPorts);
}
private static List<String> aliasesFromSlime(Inspector object) {
if ( ! object.field(aliasesKey).valid()) return Collections.emptyList();
List<String> aliases = new ArrayList<>();
object.field(aliasesKey).traverse((ArrayTraverser)(index, alias) -> aliases.add(alias.asString()));
return aliases;
}
private static Optional<Flavor> flavorFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) {
if (object.field(flavorKey).valid() && nodeFlavors.isPresent() && nodeFlavors.get().exists(object.field(flavorKey).asString())) {
return nodeFlavors.get().getFlavor(object.field(flavorKey).asString());
}
else if (object.field(resourcesKey).valid()) {
Inspector resources = object.field(resourcesKey);
return Optional.of(new Flavor(new NodeResources(resources.field(vcpuKey).asDouble(),
resources.field(memoryKey).asDouble(),
resources.field(diskKey).asDouble())));
}
else {
return Optional.empty();
}
}
private static ClusterMembership membershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembershipKey).asString(),
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersionKey).asString()));
}
private static Optional<String> optionalString(Inspector inspector) {
if ( ! inspector.valid()) return Optional.empty();
return Optional.of(inspector.asString());
}
public byte[] toJson() throws IOException {
Slime slime = new Slime();
toSlime(slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static AllocatedHosts fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) {
return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors);
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof AllocatedHosts)) return false;
return ((AllocatedHosts) other).hosts.equals(this.hosts);
}
@Override
public int hashCode() {
return hosts.hashCode();
}
@Override
public String toString() {
return hosts.toString();
}
} | class AllocatedHosts {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostNameKey = "hostName";
private static final String aliasesKey = "aliases";
private static final String hostSpecMembershipKey = "membership";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String vcpuKey = "vcpu";
private static final String memoryKey = "memory";
private static final String diskKey = "disk";
/** Wanted version */
private static final String hostSpecVespaVersionKey = "vespaVersion";
/** Current version */
private static final String hostSpecCurrentVespaVersionKey = "currentVespaVersion";
private static final String hostSpecNetworkPortsKey = "ports";
private final ImmutableSet<HostSpec> hosts;
AllocatedHosts(Set<HostSpec> hosts) {
this.hosts = ImmutableSet.copyOf(hosts);
}
public static AllocatedHosts withHosts(Set<HostSpec> hosts) {
return new AllocatedHosts(hosts);
}
private void toSlime(Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : hosts)
toSlime(host, array.addObject().setObject(hostSpecKey));
}
private void toSlime(HostSpec host, Cursor cursor) {
cursor.setString(hostSpecHostNameKey, host.hostname());
aliasesToSlime(host, cursor);
host.membership().ifPresent(membership -> {
cursor.setString(hostSpecMembershipKey, membership.stringValue());
cursor.setString(hostSpecVespaVersionKey, membership.cluster().vespaVersion().toFullString());
});
host.flavor().ifPresent(flavor -> toSlime(flavor, cursor));
host.version().ifPresent(version -> cursor.setString(hostSpecCurrentVespaVersionKey, version.toFullString()));
host.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, cursor.setArray(hostSpecNetworkPortsKey)));
}
private void aliasesToSlime(HostSpec spec, Cursor cursor) {
if (spec.aliases().isEmpty()) return;
Cursor aliases = cursor.setArray(aliasesKey);
for (String alias : spec.aliases())
aliases.addString(alias);
}
/** Returns the hosts of this allocation */
public Set<HostSpec> getHosts() { return hosts; }
private static AllocatedHosts fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse((ArrayTraverser)(i, host) -> hosts.add(hostFromSlime(host.field(hostSpecKey), nodeFlavors)));
return new AllocatedHosts(hosts);
}
static HostSpec hostFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) {
Optional<ClusterMembership> membership =
object.field(hostSpecMembershipKey).valid() ? Optional.of(membershipFromSlime(object)) : Optional.empty();
Optional<Flavor> flavor = flavorFromSlime(object, nodeFlavors);
Optional<com.yahoo.component.Version> version =
optionalString(object.field(hostSpecCurrentVespaVersionKey)).map(com.yahoo.component.Version::new);
Optional<NetworkPorts> networkPorts =
NetworkPortsSerializer.fromSlime(object.field(hostSpecNetworkPortsKey));
return new HostSpec(object.field(hostSpecHostNameKey).asString(), aliasesFromSlime(object), flavor, membership, version, networkPorts);
}
private static List<String> aliasesFromSlime(Inspector object) {
if ( ! object.field(aliasesKey).valid()) return Collections.emptyList();
List<String> aliases = new ArrayList<>();
object.field(aliasesKey).traverse((ArrayTraverser)(index, alias) -> aliases.add(alias.asString()));
return aliases;
}
private static Optional<Flavor> flavorFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) {
if (object.field(flavorKey).valid() && nodeFlavors.isPresent() && nodeFlavors.get().exists(object.field(flavorKey).asString())) {
return nodeFlavors.get().getFlavor(object.field(flavorKey).asString());
}
else if (object.field(resourcesKey).valid()) {
Inspector resources = object.field(resourcesKey);
return Optional.of(new Flavor(new NodeResources(resources.field(vcpuKey).asDouble(),
resources.field(memoryKey).asDouble(),
resources.field(diskKey).asDouble())));
}
else {
return Optional.empty();
}
}
private static ClusterMembership membershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembershipKey).asString(),
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersionKey).asString()));
}
private static Optional<String> optionalString(Inspector inspector) {
if ( ! inspector.valid()) return Optional.empty();
return Optional.of(inspector.asString());
}
public byte[] toJson() throws IOException {
Slime slime = new Slime();
toSlime(slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static AllocatedHosts fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) {
return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors);
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof AllocatedHosts)) return false;
return ((AllocatedHosts) other).hosts.equals(this.hosts);
}
@Override
public int hashCode() {
return hosts.hashCode();
}
@Override
public String toString() {
return hosts.toString();
}
} |
Use junit assertions with message instead of throwing. | public void nonOverlappingGroups() {
for (PathGroup pg : PathGroup.all()) {
for (PathGroup pg2 : PathGroup.all()) {
if (pg == pg2) continue;
Set<String> overlapping = new LinkedHashSet<>(pg.pathSpecs);
overlapping.retainAll(pg2.pathSpecs);
if (!overlapping.isEmpty()) {
throw new AssertionError("The following path specs overlap in " + pg + " and " + pg2 +
": " + overlapping);
}
}
}
} | throw new AssertionError("The following path specs overlap in " + pg + " and " + pg2 + | public void nonOverlappingGroups() {
for (PathGroup pg : PathGroup.all()) {
for (PathGroup pg2 : PathGroup.all()) {
if (pg == pg2) continue;
Set<String> overlapping = new LinkedHashSet<>(pg.pathSpecs);
overlapping.retainAll(pg2.pathSpecs);
if (!overlapping.isEmpty()) {
fail("The following path specs overlap in " + pg + " and " + pg2 + ": " + overlapping);
}
}
}
} | class PathGroupTest {
@Test
@Test
public void uniqueMatches() {
for (PathGroup group : PathGroup.values()) {
for (String path1 : group.pathSpecs)
for (String path2 : group.pathSpecs) {
if (path1 == path2) continue;
String[] parts1 = path1.split("/");
String[] parts2 = path2.split("/");
int end = Math.min(parts1.length, parts2.length);
if (end < parts1.length && ! parts2[end - 1].equals("{*}") && ! parts1[end].equals("{*}")) continue;
if (end < parts2.length && ! parts1[end - 1].equals("{*}") && ! parts2[end].equals("{*}")) continue;
int i;
for (i = 0; i < end; i++)
if ( ! parts1[i].equals(parts2[i])
&& ! (parts1[i].startsWith("{") && parts1[i].endsWith("}"))
&& ! (parts2[i].startsWith("{") && parts2[i].endsWith("}"))) break;
if (i == end) throw new AssertionError("Paths '" + path1 + "' and '" + path2 +"' overlap.");
}
}
}
} | class PathGroupTest {
@Test
@Test
public void uniqueMatches() {
for (PathGroup group : PathGroup.values())
for (String path1 : group.pathSpecs)
for (String path2 : group.pathSpecs) {
if (path1 == path2) continue;
String[] parts1 = path1.split("/");
String[] parts2 = path2.split("/");
int end = Math.min(parts1.length, parts2.length);
if (end < parts1.length && ! parts2[end - 1].equals("{*}") && ! parts1[end].equals("{*}")) continue;
if (end < parts2.length && ! parts1[end - 1].equals("{*}") && ! parts2[end].equals("{*}")) continue;
int i;
for (i = 0; i < end; i++)
if ( ! parts1[i].equals(parts2[i])
&& ! (parts1[i].startsWith("{") && parts1[i].endsWith("}"))
&& ! (parts2[i].startsWith("{") && parts2[i].endsWith("}"))) break;
if (i == end) fail("Paths '" + path1 + "' and '" + path2 + "' overlap.");
}
}
} |
Same as above. | public void uniqueMatches() {
for (PathGroup group : PathGroup.values()) {
for (String path1 : group.pathSpecs)
for (String path2 : group.pathSpecs) {
if (path1 == path2) continue;
String[] parts1 = path1.split("/");
String[] parts2 = path2.split("/");
int end = Math.min(parts1.length, parts2.length);
if (end < parts1.length && ! parts2[end - 1].equals("{*}") && ! parts1[end].equals("{*}")) continue;
if (end < parts2.length && ! parts1[end - 1].equals("{*}") && ! parts2[end].equals("{*}")) continue;
int i;
for (i = 0; i < end; i++)
if ( ! parts1[i].equals(parts2[i])
&& ! (parts1[i].startsWith("{") && parts1[i].endsWith("}"))
&& ! (parts2[i].startsWith("{") && parts2[i].endsWith("}"))) break;
if (i == end) throw new AssertionError("Paths '" + path1 + "' and '" + path2 +"' overlap.");
}
}
} | if (i == end) throw new AssertionError("Paths '" + path1 + "' and '" + path2 +"' overlap."); | public void uniqueMatches() {
for (PathGroup group : PathGroup.values())
for (String path1 : group.pathSpecs)
for (String path2 : group.pathSpecs) {
if (path1 == path2) continue;
String[] parts1 = path1.split("/");
String[] parts2 = path2.split("/");
int end = Math.min(parts1.length, parts2.length);
if (end < parts1.length && ! parts2[end - 1].equals("{*}") && ! parts1[end].equals("{*}")) continue;
if (end < parts2.length && ! parts1[end - 1].equals("{*}") && ! parts2[end].equals("{*}")) continue;
int i;
for (i = 0; i < end; i++)
if ( ! parts1[i].equals(parts2[i])
&& ! (parts1[i].startsWith("{") && parts1[i].endsWith("}"))
&& ! (parts2[i].startsWith("{") && parts2[i].endsWith("}"))) break;
if (i == end) fail("Paths '" + path1 + "' and '" + path2 + "' overlap.");
}
} | class PathGroupTest {
@Test
public void nonOverlappingGroups() {
for (PathGroup pg : PathGroup.all()) {
for (PathGroup pg2 : PathGroup.all()) {
if (pg == pg2) continue;
Set<String> overlapping = new LinkedHashSet<>(pg.pathSpecs);
overlapping.retainAll(pg2.pathSpecs);
if (!overlapping.isEmpty()) {
throw new AssertionError("The following path specs overlap in " + pg + " and " + pg2 +
": " + overlapping);
}
}
}
}
@Test
} | class PathGroupTest {
@Test
public void nonOverlappingGroups() {
for (PathGroup pg : PathGroup.all()) {
for (PathGroup pg2 : PathGroup.all()) {
if (pg == pg2) continue;
Set<String> overlapping = new LinkedHashSet<>(pg.pathSpecs);
overlapping.retainAll(pg2.pathSpecs);
if (!overlapping.isEmpty()) {
fail("The following path specs overlap in " + pg + " and " + pg2 + ": " + overlapping);
}
}
}
}
@Test
} |
What about clients that will try to use the old port? How/when should they switch to the new port number? | public int getWantedPort() {
return 19092;
} | return 19092; | public int getWantedPort() {
return 19092;
} | class MetricsProxyContainer extends Container {
public MetricsProxyContainer(AbstractConfigProducer parent, int index) {
super(parent, "" + index, index);
}
@Override
protected ContainerServiceType myServiceType() {
return METRICS_PROXY_CONTAINER;
}
@Override
@Override
public boolean requiresWantedPort() {
return true;
}
@Override
public int getPortCount() {
return super.getPortCount() + 1;
}
@Override
protected void tagServers() {
super.tagServers();
portsMeta.on(numHttpServerPorts).tag("rpc").tag("metrics");
}
} | class MetricsProxyContainer extends Container {
public MetricsProxyContainer(AbstractConfigProducer parent, int index) {
super(parent, "" + index, index);
setProp("clustertype", "admin");
setProp("index", String.valueOf(index));
}
@Override
protected ContainerServiceType myServiceType() {
return METRICS_PROXY_CONTAINER;
}
@Override
@Override
public boolean requiresWantedPort() {
return true;
}
@Override
public int getPortCount() {
return super.getPortCount() + 1;
}
@Override
protected void tagServers() {
super.tagServers();
portsMeta.on(numHttpServerPorts).tag("rpc").tag("metrics");
}
} |
The final version will most likely use the current port. | public int getWantedPort() {
return 19092;
} | return 19092; | public int getWantedPort() {
return 19092;
} | class MetricsProxyContainer extends Container {
public MetricsProxyContainer(AbstractConfigProducer parent, int index) {
super(parent, "" + index, index);
}
@Override
protected ContainerServiceType myServiceType() {
return METRICS_PROXY_CONTAINER;
}
@Override
@Override
public boolean requiresWantedPort() {
return true;
}
@Override
public int getPortCount() {
return super.getPortCount() + 1;
}
@Override
protected void tagServers() {
super.tagServers();
portsMeta.on(numHttpServerPorts).tag("rpc").tag("metrics");
}
} | class MetricsProxyContainer extends Container {
public MetricsProxyContainer(AbstractConfigProducer parent, int index) {
super(parent, "" + index, index);
setProp("clustertype", "admin");
setProp("index", String.valueOf(index));
}
@Override
protected ContainerServiceType myServiceType() {
return METRICS_PROXY_CONTAINER;
}
@Override
@Override
public boolean requiresWantedPort() {
return true;
}
@Override
public int getPortCount() {
return super.getPortCount() + 1;
}
@Override
protected void tagServers() {
super.tagServers();
portsMeta.on(numHttpServerPorts).tag("rpc").tag("metrics");
}
} |
👍 | public int getWantedPort() {
return 19092;
} | return 19092; | public int getWantedPort() {
return 19092;
} | class MetricsProxyContainer extends Container {
public MetricsProxyContainer(AbstractConfigProducer parent, int index) {
super(parent, "" + index, index);
}
@Override
protected ContainerServiceType myServiceType() {
return METRICS_PROXY_CONTAINER;
}
@Override
@Override
public boolean requiresWantedPort() {
return true;
}
@Override
public int getPortCount() {
return super.getPortCount() + 1;
}
@Override
protected void tagServers() {
super.tagServers();
portsMeta.on(numHttpServerPorts).tag("rpc").tag("metrics");
}
} | class MetricsProxyContainer extends Container {
public MetricsProxyContainer(AbstractConfigProducer parent, int index) {
super(parent, "" + index, index);
setProp("clustertype", "admin");
setProp("index", String.valueOf(index));
}
@Override
protected ContainerServiceType myServiceType() {
return METRICS_PROXY_CONTAINER;
}
@Override
@Override
public boolean requiresWantedPort() {
return true;
}
@Override
public int getPortCount() {
return super.getPortCount() + 1;
}
@Override
protected void tagServers() {
super.tagServers();
portsMeta.on(numHttpServerPorts).tag("rpc").tag("metrics");
}
} |
Consider remove comment | void simulateBroadcastTick(ClusterFixture cf) {
broadcaster.processResponses();
broadcaster.broadcastNewStateBundleIfRequired(dbContextFrom(cf.cluster()), mockCommunicator);
try {
broadcaster.checkIfClusterStateIsAckedByAllDistributors(
mockDatabaseHandler, dbContextFrom(cf.cluster()), mockFleetController);
} catch (Exception e) {
throw new RuntimeException(e);
}
broadcaster.broadcastStateActivationsIfRequired(dbContextFrom(cf.cluster()), mockCommunicator);
} | broadcaster.broadcastStateActivationsIfRequired(dbContextFrom(cf.cluster()), mockCommunicator); | void simulateBroadcastTick(ClusterFixture cf) {
broadcaster.processResponses();
broadcaster.broadcastNewStateBundleIfRequired(dbContextFrom(cf.cluster()), mockCommunicator);
try {
broadcaster.checkIfClusterStateIsAckedByAllDistributors(
mockDatabaseHandler, dbContextFrom(cf.cluster()), mockFleetController);
} catch (Exception e) {
throw new RuntimeException(e);
}
broadcaster.broadcastStateActivationsIfRequired(dbContextFrom(cf.cluster()), mockCommunicator);
} | class Fixture {
FakeTimer timer = new FakeTimer();
final Object monitor = new Object();
SystemStateBroadcaster broadcaster = new SystemStateBroadcaster(timer, monitor);
Communicator mockCommunicator = mock(Communicator.class);
DatabaseHandler mockDatabaseHandler = mock(DatabaseHandler.class);
FleetController mockFleetController = mock(FleetController.class);
void simulateNodePartitionedAwaySilently(ClusterFixture cf) {
cf.cluster().getNodeInfo(Node.ofStorage(0)).setStartTimestamp(600);
cf.cluster().getNodeInfo(Node.ofStorage(1)).setStartTimestamp(700);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setStartTimestamp(500);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.UP).setStartTimestamp(500), 1000);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.DOWN).setStartTimestamp(500), 2000);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.UP).setStartTimestamp(500), 3000);
}
} | class Fixture {
FakeTimer timer = new FakeTimer();
final Object monitor = new Object();
SystemStateBroadcaster broadcaster = new SystemStateBroadcaster(timer, monitor);
Communicator mockCommunicator = mock(Communicator.class);
DatabaseHandler mockDatabaseHandler = mock(DatabaseHandler.class);
FleetController mockFleetController = mock(FleetController.class);
void simulateNodePartitionedAwaySilently(ClusterFixture cf) {
cf.cluster().getNodeInfo(Node.ofStorage(0)).setStartTimestamp(600);
cf.cluster().getNodeInfo(Node.ofStorage(1)).setStartTimestamp(700);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setStartTimestamp(500);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.UP).setStartTimestamp(500), 1000);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.DOWN).setStartTimestamp(500), 2000);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.UP).setStartTimestamp(500), 3000);
}
} |
Consider moving common setup code to separate function | public void state_bundle_not_considered_converged_until_activation_acked_by_all_distributors() {
var f = StateActivationFixture.withTwoPhaseEnabled();
var cf = f.cf;
f.expectSetSystemStateInvocationsToBothDistributors();
f.simulateBroadcastTick(cf);
respondToSetClusterStateBundle(cf.cluster.getNodeInfo(Node.ofDistributor(0)), f.stateBundle, f.d0Waiter.getValue());
respondToSetClusterStateBundle(cf.cluster.getNodeInfo(Node.ofDistributor(1)), f.stateBundle, f.d1Waiter.getValue());
f.simulateBroadcastTick(cf);
final var d0ActivateWaiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
final var d1ActivateWaiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
clusterNodeInfos(cf.cluster(), Node.ofDistributor(0), Node.ofDistributor(1)).forEach(nodeInfo -> {
verify(f.mockCommunicator).activateClusterStateVersion(eq(123), eq(nodeInfo),
(nodeInfo.getNodeIndex() == 0 ? d0ActivateWaiter : d1ActivateWaiter).capture());
});
respondToActivateClusterStateVersion(cf.cluster.getNodeInfo(Node.ofDistributor(0)),
f.stateBundle, d0ActivateWaiter.getValue());
f.simulateBroadcastTick(cf);
assertNull(f.broadcaster.getLastClusterStateBundleConverged());
respondToActivateClusterStateVersion(cf.cluster.getNodeInfo(Node.ofDistributor(1)),
f.stateBundle, d1ActivateWaiter.getValue());
f.simulateBroadcastTick(cf);
assertEquals(f.stateBundle, f.broadcaster.getLastClusterStateBundleConverged());
} | f.expectSetSystemStateInvocationsToBothDistributors(); | public void state_bundle_not_considered_converged_until_activation_acked_by_all_distributors() {
var f = StateActivationFixture.withTwoPhaseEnabled();
var cf = f.cf;
f.ackStateBundleFromBothDistributors();
final var d0ActivateWaiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
final var d1ActivateWaiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
clusterNodeInfos(cf.cluster(), Node.ofDistributor(0), Node.ofDistributor(1)).forEach(nodeInfo -> {
verify(f.mockCommunicator).activateClusterStateVersion(eq(123), eq(nodeInfo),
(nodeInfo.getNodeIndex() == 0 ? d0ActivateWaiter : d1ActivateWaiter).capture());
});
respondToActivateClusterStateVersion(cf.cluster.getNodeInfo(Node.ofDistributor(0)),
f.stateBundle, d0ActivateWaiter.getValue());
f.simulateBroadcastTick(cf);
assertNull(f.broadcaster.getLastClusterStateBundleConverged());
respondToActivateClusterStateVersion(cf.cluster.getNodeInfo(Node.ofDistributor(1)),
f.stateBundle, d1ActivateWaiter.getValue());
f.simulateBroadcastTick(cf);
assertEquals(f.stateBundle, f.broadcaster.getLastClusterStateBundleConverged());
} | class StateActivationFixture extends Fixture {
ClusterStateBundle stateBundle;
ClusterFixture cf;
@SuppressWarnings("rawtypes")
final ArgumentCaptor<Communicator.Waiter> d0Waiter;
@SuppressWarnings("rawtypes")
final ArgumentCaptor<Communicator.Waiter> d1Waiter;
private StateActivationFixture(boolean enableDeferred) {
super();
stateBundle = ClusterStateBundleUtil
.makeBundleBuilder("version:123 distributor:2 storage:2")
.deferredActivation(enableDeferred)
.deriveAndBuild();
cf = ClusterFixture.forFlatCluster(2).bringEntireClusterUp().assignDummyRpcAddresses();
broadcaster.handleNewClusterStates(stateBundle);
broadcaster.broadcastNewStateBundleIfRequired(dbContextFrom(cf.cluster()), mockCommunicator);
d0Waiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
d1Waiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
}
@SuppressWarnings("unchecked")
void expectSetSystemStateInvocationsToBothDistributors() {
clusterNodeInfos(cf.cluster(), Node.ofDistributor(0), Node.ofDistributor(1)).forEach(nodeInfo -> {
verify(mockCommunicator).setSystemState(eq(stateBundle), eq(nodeInfo),
(nodeInfo.getNodeIndex() == 0 ? d0Waiter : d1Waiter).capture());
});
}
static StateActivationFixture withTwoPhaseEnabled() {
return new StateActivationFixture(true);
}
static StateActivationFixture withTwoPhaseDisabled() {
return new StateActivationFixture(false);
}
} | class StateActivationFixture extends Fixture {
ClusterStateBundle stateBundle;
ClusterFixture cf;
@SuppressWarnings("rawtypes")
final ArgumentCaptor<Communicator.Waiter> d0Waiter;
@SuppressWarnings("rawtypes")
final ArgumentCaptor<Communicator.Waiter> d1Waiter;
private StateActivationFixture(boolean enableDeferred) {
super();
stateBundle = ClusterStateBundleUtil
.makeBundleBuilder("version:123 distributor:2 storage:2")
.deferredActivation(enableDeferred)
.deriveAndBuild();
cf = ClusterFixture.forFlatCluster(2).bringEntireClusterUp().assignDummyRpcAddresses();
broadcaster.handleNewClusterStates(stateBundle);
broadcaster.broadcastNewStateBundleIfRequired(dbContextFrom(cf.cluster()), mockCommunicator);
d0Waiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
d1Waiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
}
@SuppressWarnings("unchecked")
void expectSetSystemStateInvocationsToBothDistributors() {
clusterNodeInfos(cf.cluster(), Node.ofDistributor(0), Node.ofDistributor(1)).forEach(nodeInfo -> {
verify(mockCommunicator).setSystemState(eq(stateBundle), eq(nodeInfo),
(nodeInfo.getNodeIndex() == 0 ? d0Waiter : d1Waiter).capture());
});
}
@SuppressWarnings("unchecked")
void ackStateBundleFromBothDistributors() {
expectSetSystemStateInvocationsToBothDistributors();
simulateBroadcastTick(cf);
respondToSetClusterStateBundle(cf.cluster.getNodeInfo(Node.ofDistributor(0)), stateBundle, d0Waiter.getValue());
respondToSetClusterStateBundle(cf.cluster.getNodeInfo(Node.ofDistributor(1)), stateBundle, d1Waiter.getValue());
simulateBroadcastTick(cf);
}
static StateActivationFixture withTwoPhaseEnabled() {
return new StateActivationFixture(true);
}
static StateActivationFixture withTwoPhaseDisabled() {
return new StateActivationFixture(false);
}
} |
Done | public void state_bundle_not_considered_converged_until_activation_acked_by_all_distributors() {
var f = StateActivationFixture.withTwoPhaseEnabled();
var cf = f.cf;
f.expectSetSystemStateInvocationsToBothDistributors();
f.simulateBroadcastTick(cf);
respondToSetClusterStateBundle(cf.cluster.getNodeInfo(Node.ofDistributor(0)), f.stateBundle, f.d0Waiter.getValue());
respondToSetClusterStateBundle(cf.cluster.getNodeInfo(Node.ofDistributor(1)), f.stateBundle, f.d1Waiter.getValue());
f.simulateBroadcastTick(cf);
final var d0ActivateWaiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
final var d1ActivateWaiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
clusterNodeInfos(cf.cluster(), Node.ofDistributor(0), Node.ofDistributor(1)).forEach(nodeInfo -> {
verify(f.mockCommunicator).activateClusterStateVersion(eq(123), eq(nodeInfo),
(nodeInfo.getNodeIndex() == 0 ? d0ActivateWaiter : d1ActivateWaiter).capture());
});
respondToActivateClusterStateVersion(cf.cluster.getNodeInfo(Node.ofDistributor(0)),
f.stateBundle, d0ActivateWaiter.getValue());
f.simulateBroadcastTick(cf);
assertNull(f.broadcaster.getLastClusterStateBundleConverged());
respondToActivateClusterStateVersion(cf.cluster.getNodeInfo(Node.ofDistributor(1)),
f.stateBundle, d1ActivateWaiter.getValue());
f.simulateBroadcastTick(cf);
assertEquals(f.stateBundle, f.broadcaster.getLastClusterStateBundleConverged());
} | f.expectSetSystemStateInvocationsToBothDistributors(); | public void state_bundle_not_considered_converged_until_activation_acked_by_all_distributors() {
var f = StateActivationFixture.withTwoPhaseEnabled();
var cf = f.cf;
f.ackStateBundleFromBothDistributors();
final var d0ActivateWaiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
final var d1ActivateWaiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
clusterNodeInfos(cf.cluster(), Node.ofDistributor(0), Node.ofDistributor(1)).forEach(nodeInfo -> {
verify(f.mockCommunicator).activateClusterStateVersion(eq(123), eq(nodeInfo),
(nodeInfo.getNodeIndex() == 0 ? d0ActivateWaiter : d1ActivateWaiter).capture());
});
respondToActivateClusterStateVersion(cf.cluster.getNodeInfo(Node.ofDistributor(0)),
f.stateBundle, d0ActivateWaiter.getValue());
f.simulateBroadcastTick(cf);
assertNull(f.broadcaster.getLastClusterStateBundleConverged());
respondToActivateClusterStateVersion(cf.cluster.getNodeInfo(Node.ofDistributor(1)),
f.stateBundle, d1ActivateWaiter.getValue());
f.simulateBroadcastTick(cf);
assertEquals(f.stateBundle, f.broadcaster.getLastClusterStateBundleConverged());
} | class StateActivationFixture extends Fixture {
ClusterStateBundle stateBundle;
ClusterFixture cf;
@SuppressWarnings("rawtypes")
final ArgumentCaptor<Communicator.Waiter> d0Waiter;
@SuppressWarnings("rawtypes")
final ArgumentCaptor<Communicator.Waiter> d1Waiter;
private StateActivationFixture(boolean enableDeferred) {
super();
stateBundle = ClusterStateBundleUtil
.makeBundleBuilder("version:123 distributor:2 storage:2")
.deferredActivation(enableDeferred)
.deriveAndBuild();
cf = ClusterFixture.forFlatCluster(2).bringEntireClusterUp().assignDummyRpcAddresses();
broadcaster.handleNewClusterStates(stateBundle);
broadcaster.broadcastNewStateBundleIfRequired(dbContextFrom(cf.cluster()), mockCommunicator);
d0Waiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
d1Waiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
}
@SuppressWarnings("unchecked")
void expectSetSystemStateInvocationsToBothDistributors() {
clusterNodeInfos(cf.cluster(), Node.ofDistributor(0), Node.ofDistributor(1)).forEach(nodeInfo -> {
verify(mockCommunicator).setSystemState(eq(stateBundle), eq(nodeInfo),
(nodeInfo.getNodeIndex() == 0 ? d0Waiter : d1Waiter).capture());
});
}
static StateActivationFixture withTwoPhaseEnabled() {
return new StateActivationFixture(true);
}
static StateActivationFixture withTwoPhaseDisabled() {
return new StateActivationFixture(false);
}
} | class StateActivationFixture extends Fixture {
ClusterStateBundle stateBundle;
ClusterFixture cf;
@SuppressWarnings("rawtypes")
final ArgumentCaptor<Communicator.Waiter> d0Waiter;
@SuppressWarnings("rawtypes")
final ArgumentCaptor<Communicator.Waiter> d1Waiter;
private StateActivationFixture(boolean enableDeferred) {
super();
stateBundle = ClusterStateBundleUtil
.makeBundleBuilder("version:123 distributor:2 storage:2")
.deferredActivation(enableDeferred)
.deriveAndBuild();
cf = ClusterFixture.forFlatCluster(2).bringEntireClusterUp().assignDummyRpcAddresses();
broadcaster.handleNewClusterStates(stateBundle);
broadcaster.broadcastNewStateBundleIfRequired(dbContextFrom(cf.cluster()), mockCommunicator);
d0Waiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
d1Waiter = ArgumentCaptor.forClass(Communicator.Waiter.class);
}
@SuppressWarnings("unchecked")
void expectSetSystemStateInvocationsToBothDistributors() {
clusterNodeInfos(cf.cluster(), Node.ofDistributor(0), Node.ofDistributor(1)).forEach(nodeInfo -> {
verify(mockCommunicator).setSystemState(eq(stateBundle), eq(nodeInfo),
(nodeInfo.getNodeIndex() == 0 ? d0Waiter : d1Waiter).capture());
});
}
@SuppressWarnings("unchecked")
void ackStateBundleFromBothDistributors() {
expectSetSystemStateInvocationsToBothDistributors();
simulateBroadcastTick(cf);
respondToSetClusterStateBundle(cf.cluster.getNodeInfo(Node.ofDistributor(0)), stateBundle, d0Waiter.getValue());
respondToSetClusterStateBundle(cf.cluster.getNodeInfo(Node.ofDistributor(1)), stateBundle, d1Waiter.getValue());
simulateBroadcastTick(cf);
}
static StateActivationFixture withTwoPhaseEnabled() {
return new StateActivationFixture(true);
}
static StateActivationFixture withTwoPhaseDisabled() {
return new StateActivationFixture(false);
}
} |
Done | void simulateBroadcastTick(ClusterFixture cf) {
broadcaster.processResponses();
broadcaster.broadcastNewStateBundleIfRequired(dbContextFrom(cf.cluster()), mockCommunicator);
try {
broadcaster.checkIfClusterStateIsAckedByAllDistributors(
mockDatabaseHandler, dbContextFrom(cf.cluster()), mockFleetController);
} catch (Exception e) {
throw new RuntimeException(e);
}
broadcaster.broadcastStateActivationsIfRequired(dbContextFrom(cf.cluster()), mockCommunicator);
} | broadcaster.broadcastStateActivationsIfRequired(dbContextFrom(cf.cluster()), mockCommunicator); | void simulateBroadcastTick(ClusterFixture cf) {
broadcaster.processResponses();
broadcaster.broadcastNewStateBundleIfRequired(dbContextFrom(cf.cluster()), mockCommunicator);
try {
broadcaster.checkIfClusterStateIsAckedByAllDistributors(
mockDatabaseHandler, dbContextFrom(cf.cluster()), mockFleetController);
} catch (Exception e) {
throw new RuntimeException(e);
}
broadcaster.broadcastStateActivationsIfRequired(dbContextFrom(cf.cluster()), mockCommunicator);
} | class Fixture {
FakeTimer timer = new FakeTimer();
final Object monitor = new Object();
SystemStateBroadcaster broadcaster = new SystemStateBroadcaster(timer, monitor);
Communicator mockCommunicator = mock(Communicator.class);
DatabaseHandler mockDatabaseHandler = mock(DatabaseHandler.class);
FleetController mockFleetController = mock(FleetController.class);
void simulateNodePartitionedAwaySilently(ClusterFixture cf) {
cf.cluster().getNodeInfo(Node.ofStorage(0)).setStartTimestamp(600);
cf.cluster().getNodeInfo(Node.ofStorage(1)).setStartTimestamp(700);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setStartTimestamp(500);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.UP).setStartTimestamp(500), 1000);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.DOWN).setStartTimestamp(500), 2000);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.UP).setStartTimestamp(500), 3000);
}
} | class Fixture {
FakeTimer timer = new FakeTimer();
final Object monitor = new Object();
SystemStateBroadcaster broadcaster = new SystemStateBroadcaster(timer, monitor);
Communicator mockCommunicator = mock(Communicator.class);
DatabaseHandler mockDatabaseHandler = mock(DatabaseHandler.class);
FleetController mockFleetController = mock(FleetController.class);
void simulateNodePartitionedAwaySilently(ClusterFixture cf) {
cf.cluster().getNodeInfo(Node.ofStorage(0)).setStartTimestamp(600);
cf.cluster().getNodeInfo(Node.ofStorage(1)).setStartTimestamp(700);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setStartTimestamp(500);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.UP).setStartTimestamp(500), 1000);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.DOWN).setStartTimestamp(500), 2000);
cf.cluster().getNodeInfo(Node.ofDistributor(0)).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.UP).setStartTimestamp(500), 3000);
}
} |
It's a brave new world. Do we need to be concerned about leaking internal / user data through stack traces? | public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case PUT: return handlePUT(request);
case POST: return handlePOST(request);
case DELETE: return handleDELETE(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
} | return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); | public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case PUT: return handlePUT(request);
case POST: return handlePOST(request);
case DELETE: return handleDELETE(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
} | class UserApiHandler extends LoggingRequestHandler {
private final static Logger log = Logger.getLogger(UserApiHandler.class.getName());
public UserApiHandler(Context parentCtx) {
super(parentCtx);
}
@Override
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
return ErrorResponse.notFoundError(String.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
return ErrorResponse.notFoundError(String.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse handlePOST(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
return ErrorResponse.notFoundError(String.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
return ErrorResponse.notFoundError(String.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse handleOPTIONS() {
EmptyJsonResponse response = new EmptyJsonResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
} | class UserApiHandler extends LoggingRequestHandler {
private final static Logger log = Logger.getLogger(UserApiHandler.class.getName());
public UserApiHandler(Context parentCtx) {
super(parentCtx);
}
@Override
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
return ErrorResponse.notFoundError(String.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
return ErrorResponse.notFoundError(String.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse handlePOST(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
return ErrorResponse.notFoundError(String.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
return ErrorResponse.notFoundError(String.format("No '%s' handler at '%s'", request.getMethod(),
request.getUri().getPath()));
}
private HttpResponse handleOPTIONS() {
EmptyJsonResponse response = new EmptyJsonResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
} |
please use just "rpc" to align with other services | public String[] getPortSuffixes() {
return new String[]{ "logtp/rpc", "logtp/legacy", "unused/1", "unused/2" };
} | return new String[]{ "logtp/rpc", "logtp/legacy", "unused/1", "unused/2" }; | public String[] getPortSuffixes() {
return new String[]{ "rpc", "legacy", "unused/1", "unused/2" };
} | class Logserver extends AbstractService {
private static final long serialVersionUID = 1L;
private static final String logArchiveDir = "$ROOT/logs/vespa/logarchive";
public Logserver(AbstractConfigProducer parent) {
super(parent, "logserver");
portsMeta.on(0).tag("logtp").tag("rpc");
portsMeta.on(1).tag("logtp").tag("legacy");
portsMeta.on(2).tag("unused");
portsMeta.on(3).tag("unused");
setProp("clustertype", "admin");
setProp("clustername", "admin");
}
/**
* @return the startup command for the logserver
*/
public String getStartupCommand() {
return "exec $ROOT/bin/vespa-logserver-start " + getMyJVMArgs() + " " + getJvmOptions();
}
/**
* @return the jvm args to be used by the logserver.
*/
private String getMyJVMArgs() {
StringBuilder sb = new StringBuilder();
sb.append("-Dlogserver.rpcListenPort=").append(getRelativePort(0));
sb.append(" ");
sb.append("-Dlogserver.listenport=").append(getRelativePort(1));
sb.append(" ");
sb.append("-Dlogserver.logarchive.dir=" + logArchiveDir);
return sb.toString();
}
/**
* Returns the desired base port for this service.
*/
public int getWantedPort() {
return 19080;
}
/**
* The desired base port is the only allowed base port.
*
* @return 'true' always
*/
public boolean requiresWantedPort() {
return true;
}
/**
* @return the number of ports needed by the logserver.
*/
public int getPortCount() {
return 4;
}
@Override
} | class Logserver extends AbstractService {
private static final long serialVersionUID = 1L;
private static final String logArchiveDir = "$ROOT/logs/vespa/logarchive";
public Logserver(AbstractConfigProducer parent) {
super(parent, "logserver");
portsMeta.on(0).tag("logtp").tag("rpc");
portsMeta.on(1).tag("logtp").tag("legacy");
portsMeta.on(2).tag("unused");
portsMeta.on(3).tag("unused");
setProp("clustertype", "admin");
setProp("clustername", "admin");
}
/**
* @return the startup command for the logserver
*/
public String getStartupCommand() {
return "exec $ROOT/bin/vespa-logserver-start " + getMyJVMArgs() + " " + getJvmOptions();
}
/**
* @return the jvm args to be used by the logserver.
*/
private String getMyJVMArgs() {
StringBuilder sb = new StringBuilder();
sb.append("-Dlogserver.rpcListenPort=").append(getRelativePort(0));
sb.append(" ");
sb.append("-Dlogserver.listenport=").append(getRelativePort(1));
sb.append(" ");
sb.append("-Dlogserver.logarchive.dir=" + logArchiveDir);
return sb.toString();
}
/**
* Returns the desired base port for this service.
*/
public int getWantedPort() {
return 19080;
}
/**
* The desired base port is the only allowed base port.
*
* @return 'true' always
*/
public boolean requiresWantedPort() {
return true;
}
/**
* @return the number of ports needed by the logserver.
*/
public int getPortCount() {
return 4;
}
@Override
} |
Given that we're not capping cpu in cd non-prod now I guess we can also remove the next clause, ``` if (zone.system() == SystemName.cd && zone.environment() == Environment.test || zone.environment() == Environment.staging) return clusterType == ClusterSpec.Type.admin ? new NodeResources(1, 3, 50) : new NodeResources(4, 4, 50); ``` ? | private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) {
if (zone.system() == SystemName.PublicCd && clusterType == ClusterSpec.Type.admin && zone.environment() != Environment.prod)
return new NodeResources(1, 3, 50);
if (zone.system() == SystemName.cd && zone.environment() == Environment.dev && zone.region().value().equals("cd-us-west-1"))
return new NodeResources(1, 4, 50);
if (zone.system() == SystemName.cd && zone.environment() == Environment.test || zone.environment() == Environment.staging)
return clusterType == ClusterSpec.Type.admin ? new NodeResources(1, 3, 50)
: new NodeResources(4, 4, 50);
return new NodeResources(2, 8, 50);
} | return new NodeResources(1, 4, 50); | private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) {
if (clusterType == ClusterSpec.Type.admin)
return new NodeResources(0.5, 3, 50);
if (zone.system() == SystemName.cd && zone.environment().isTest())
new NodeResources(4, 4, 50);
return new NodeResources(2, 8, 50);
} | class CapacityPolicies {
private final Zone zone;
private final NodeFlavors flavors;
public CapacityPolicies(Zone zone, NodeFlavors flavors) {
this.zone = zone;
this.flavors = flavors;
}
public int decideSize(Capacity requestedCapacity, ClusterSpec.Type clusterType) {
int requestedNodes = ensureRedundancy(requestedCapacity.nodeCount(), clusterType, requestedCapacity.canFail());
if (requestedCapacity.isRequired()) return requestedNodes;
switch(zone.environment()) {
case dev : case test : return 1;
case perf : return Math.min(requestedCapacity.nodeCount(), 3);
case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10);
case prod : return requestedNodes;
default : throw new IllegalArgumentException("Unsupported environment " + zone.environment());
}
}
public NodeResources decideNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) {
NodeResources resources = specifiedOrDefaultNodeResources(requestedResources, cluster);
if (resources.allocateByLegacyName()) return resources;
if (zone.system() == SystemName.cd || zone.environment() == Environment.dev || zone.environment() == Environment.test)
resources = resources.withDiskSpeed(NodeResources.DiskSpeed.any);
if (zone.environment() == Environment.dev)
resources = resources.withVcpu(0.1);
return resources;
}
private NodeResources specifiedOrDefaultNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) {
if (requestedResources.isPresent() && ! requestedResources.get().allocateByLegacyName())
return requestedResources.get();
if (requestedResources.isEmpty())
return defaultNodeResources(cluster.type());
if (zone.system() == SystemName.cd)
return flavors.exists(requestedResources.get().legacyName().get()) ? requestedResources.get()
: defaultNodeResources(cluster.type());
else {
switch (zone.environment()) {
case dev: case test: case staging: return defaultNodeResources(cluster.type());
default:
flavors.getFlavorOrThrow(requestedResources.get().legacyName().get());
return requestedResources.get();
}
}
}
/**
* Whether or not the nodes requested can share physical host with other applications.
* A security feature which only makes sense for prod.
*/
public boolean decideExclusivity(boolean requestedExclusivity) {
return requestedExclusivity && zone.environment() == Environment.prod;
}
/**
* Throw if the node count is 1 for container and content clusters and we're in a production zone
*
* @return the argument node count
* @throws IllegalArgumentException if only one node is requested and we can fail
*/
private int ensureRedundancy(int nodeCount, ClusterSpec.Type clusterType, boolean canFail) {
if (canFail &&
nodeCount == 1 &&
Arrays.asList(ClusterSpec.Type.container, ClusterSpec.Type.content).contains(clusterType) &&
zone.environment().isProduction())
throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy");
return nodeCount;
}
} | class CapacityPolicies {
private final Zone zone;
private final NodeFlavors flavors;
public CapacityPolicies(Zone zone, NodeFlavors flavors) {
this.zone = zone;
this.flavors = flavors;
}
public int decideSize(Capacity requestedCapacity, ClusterSpec.Type clusterType) {
int requestedNodes = ensureRedundancy(requestedCapacity.nodeCount(), clusterType, requestedCapacity.canFail());
if (requestedCapacity.isRequired()) return requestedNodes;
switch(zone.environment()) {
case dev : case test : return 1;
case perf : return Math.min(requestedCapacity.nodeCount(), 3);
case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10);
case prod : return requestedNodes;
default : throw new IllegalArgumentException("Unsupported environment " + zone.environment());
}
}
public NodeResources decideNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) {
NodeResources resources = specifiedOrDefaultNodeResources(requestedResources, cluster);
if (resources.allocateByLegacyName()) return resources;
if (zone.system() == SystemName.cd || zone.environment() == Environment.dev || zone.environment() == Environment.test)
resources = resources.withDiskSpeed(NodeResources.DiskSpeed.any);
if (zone.environment() == Environment.dev)
resources = resources.withVcpu(0.1);
return resources;
}
private NodeResources specifiedOrDefaultNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) {
if (requestedResources.isPresent() && ! requestedResources.get().allocateByLegacyName())
return requestedResources.get();
if (requestedResources.isEmpty())
return defaultNodeResources(cluster.type());
if (zone.system() == SystemName.cd)
return flavors.exists(requestedResources.get().legacyName().get()) ? requestedResources.get()
: defaultNodeResources(cluster.type());
else {
switch (zone.environment()) {
case dev: case test: case staging: return defaultNodeResources(cluster.type());
default:
flavors.getFlavorOrThrow(requestedResources.get().legacyName().get());
return requestedResources.get();
}
}
}
/**
* Whether or not the nodes requested can share physical host with other applications.
* A security feature which only makes sense for prod.
*/
public boolean decideExclusivity(boolean requestedExclusivity) {
return requestedExclusivity && zone.environment() == Environment.prod;
}
/**
* Throw if the node count is 1 for container and content clusters and we're in a production zone
*
* @return the argument node count
* @throws IllegalArgumentException if only one node is requested and we can fail
*/
private int ensureRedundancy(int nodeCount, ClusterSpec.Type clusterType, boolean canFail) {
if (canFail &&
nodeCount == 1 &&
Arrays.asList(ClusterSpec.Type.container, ClusterSpec.Type.content).contains(clusterType) &&
zone.environment().isProduction())
throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy");
return nodeCount;
}
} |
Consider extracting a method for these two lines that takes context as argument, to remove the repetition. | public Builder limitedTo(TenantName tenant, ApplicationName application) {
roles.putIfAbsent(current, new HashSet<>());
roles.get(current).add(Context.limitedTo(tenant, application, system));
current = null;
return this;
} | roles.get(current).add(Context.limitedTo(tenant, application, system)); | public Builder limitedTo(TenantName tenant, ApplicationName application) {
consumeCurrent(Context.limitedTo(tenant, application, system));
return this;
} | class BuilderWithRole implements Builder {
private final SystemName system;
private final Map<Role, Set<Context>> roles;
private Role current;
private BuilderWithRole(SystemName system) {
this.system = Objects.requireNonNull(system);
this.roles = new HashMap<>();
}
@Override
public BuilderWithRole add(Role role) {
if (current != null) {
roles.putIfAbsent(current, new HashSet<>());
roles.get(current).add(Context.unlimitedIn(system));
}
current = role;
return this;
}
public Builder limitedTo(TenantName tenant) {
roles.putIfAbsent(current, new HashSet<>());
roles.get(current).add(Context.limitedTo(tenant, system));
current = null;
return this;
}
@Override
public RoleMembership build() {
if (current != null) {
roles.putIfAbsent(current, new HashSet<>());
roles.get(current).add(Context.unlimitedIn(system));
}
return new RoleMembership(roles);
}
} | class BuilderWithRole implements Builder {
private final SystemName system;
private final Map<Role, Set<Context>> roles;
private Role current;
private BuilderWithRole(SystemName system) {
this.system = Objects.requireNonNull(system);
this.roles = new HashMap<>();
}
@Override
public BuilderWithRole add(Role role) {
consumeCurrent(Context.unlimitedIn(system));
current = role;
return this;
}
public Builder limitedTo(TenantName tenant) {
consumeCurrent(Context.limitedTo(tenant, system));
return this;
}
@Override
public RoleMembership build() {
consumeCurrent(Context.unlimitedIn(system));
return new RoleMembership(roles);
}
private void consumeCurrent(Context context) {
if (current != null) {
roles.putIfAbsent(current, new HashSet<>());
roles.get(current).add(context);
}
current = null;
}
} |
Sure. | public Builder limitedTo(TenantName tenant, ApplicationName application) {
roles.putIfAbsent(current, new HashSet<>());
roles.get(current).add(Context.limitedTo(tenant, application, system));
current = null;
return this;
} | roles.get(current).add(Context.limitedTo(tenant, application, system)); | public Builder limitedTo(TenantName tenant, ApplicationName application) {
consumeCurrent(Context.limitedTo(tenant, application, system));
return this;
} | class BuilderWithRole implements Builder {
private final SystemName system;
private final Map<Role, Set<Context>> roles;
private Role current;
private BuilderWithRole(SystemName system) {
this.system = Objects.requireNonNull(system);
this.roles = new HashMap<>();
}
@Override
public BuilderWithRole add(Role role) {
if (current != null) {
roles.putIfAbsent(current, new HashSet<>());
roles.get(current).add(Context.unlimitedIn(system));
}
current = role;
return this;
}
public Builder limitedTo(TenantName tenant) {
roles.putIfAbsent(current, new HashSet<>());
roles.get(current).add(Context.limitedTo(tenant, system));
current = null;
return this;
}
@Override
public RoleMembership build() {
if (current != null) {
roles.putIfAbsent(current, new HashSet<>());
roles.get(current).add(Context.unlimitedIn(system));
}
return new RoleMembership(roles);
}
} | class BuilderWithRole implements Builder {
private final SystemName system;
private final Map<Role, Set<Context>> roles;
private Role current;
private BuilderWithRole(SystemName system) {
this.system = Objects.requireNonNull(system);
this.roles = new HashMap<>();
}
@Override
public BuilderWithRole add(Role role) {
consumeCurrent(Context.unlimitedIn(system));
current = role;
return this;
}
public Builder limitedTo(TenantName tenant) {
consumeCurrent(Context.limitedTo(tenant, system));
return this;
}
@Override
public RoleMembership build() {
consumeCurrent(Context.unlimitedIn(system));
return new RoleMembership(roles);
}
private void consumeCurrent(Context context) {
if (current != null) {
roles.putIfAbsent(current, new HashSet<>());
roles.get(current).add(context);
}
current = null;
}
} |
the format method thinks "0" means unknown but here it's "-1" that means unknown | private static long parseThreadId(String threadProcess) {
int slashIndex = threadProcess.indexOf('/');
if (slashIndex == -1) {
return -1;
}
return Long.parseLong(threadProcess.substring(slashIndex + 1));
} | return -1; | private static long parseThreadId(String threadProcess) {
int slashIndex = threadProcess.indexOf('/');
if (slashIndex == -1) {
return 0;
}
return Long.parseLong(threadProcess.substring(slashIndex + 1));
} | class LogMessage
{
private static Logger log = Logger.getLogger(LogMessage.class.getName());
private static Pattern nativeFormat =
Pattern.compile("^(\\d[^\t]+)\t" +
"([^\t]+)\t" +
"([^\t]+)\t" +
"([^\t]+)\t" +
"([^\t]+)\t" +
"([^\t]+)\t" +
"(.+)$"
);
private Instant time;
private String host;
private long processId;
private long threadId;
private String service;
private String component;
private Level level;
private String payload;
private Event event;
/**
* Private constructor. Log messages should never be instantiated
* directly; only as the result of a static factory method.
*/
private LogMessage (Instant time, String host, long processId, long threadId,
String service, String component, Level level,
String payload)
{
this.time = time;
this.host = host;
this.processId = processId;
this.threadId = threadId;
this.service = service;
this.component = component;
this.level = level;
this.payload = payload;
}
public static LogMessage of(
Instant time, String host, long processId, long threadId,
String service, String component, Level level, String payload) {
return new LogMessage(time, host, processId, threadId, service, component, level, payload);
}
public Instant getTimestamp() {return time;}
/**
* @deprecated Use {@link
*/
@Deprecated(since = "7", forRemoval = true)
public long getTime () {return time.toEpochMilli();}
/**
* @deprecated Use {@link
*/
@Deprecated(since = "7", forRemoval = true)
public long getTimeInSeconds () {return time.getEpochSecond();}
public String getHost () {return host;}
public long getProcessId() {return processId;}
public OptionalLong getThreadId() {return threadId != -1 ? OptionalLong.of(threadId) : OptionalLong.empty();}
/**
* @deprecated Use {@link
*/
@Deprecated(since = "7", forRemoval = true)
public String getThreadProcess () {return VespaFormat.formatThreadProcess(processId, threadId);}
public String getService () {return service;}
public String getComponent () {return component;}
public Level getLevel () {return level;}
public String getPayload () {return payload;}
/**
* Make a log message from the native format of the logging
* package.
*
* @param msg The log message
* @return Returns a LogMessage instance
* @throws InvalidLogFormatException if the log message
* can not be parsed, ie. is invalid, we throw this
* exception.
*/
public static LogMessage parseNativeFormat(String msg) throws InvalidLogFormatException {
Matcher m = nativeFormat.matcher(msg);
if (! m.matches()) {
throw new InvalidLogFormatException(msg);
}
Level msgLevel = LogLevel.parse(m.group(6));
Instant timestamp = parseTimestamp(m.group(1));
String threadProcess = m.group(3);
return new LogMessage(timestamp, m.group(2), parseProcessId(threadProcess), parseThreadId(threadProcess),
m.group(4), m.group(5), msgLevel,
m.group(7));
}
private static Instant parseTimestamp(String timeStr) throws InvalidLogFormatException {
try {
long nanoseconds = (long) (Double.parseDouble(timeStr) * 1_000_000_000L);
return Instant.ofEpochSecond(0, nanoseconds);
} catch (NumberFormatException e) {
throw new InvalidLogFormatException("Invalid time string: " + timeStr);
}
}
private static long parseProcessId(String threadProcess) {
int slashIndex = threadProcess.indexOf('/');
if (slashIndex == -1) {
return Long.parseLong(threadProcess);
}
return Long.parseLong(threadProcess.substring(0, slashIndex));
}
/**
* If the LogMessage was an EVENT then this method can
* be used to get the Event instance representing the
* event. The event instance created the first time
* this method is called and then cached.
*
* TODO: make sure this throws exception!
*
* @return Returns Event instance if this is an event message
* and the payload is correctly formatted. Otherwise
* it will return <code>null</code>.
*
*/
public Event getEvent () throws MalformedEventException {
if ((level == LogLevel.EVENT) && (event == null)) {
try {
event = Event.parse(getPayload());
event.setTime(time.toEpochMilli());
}
catch (MalformedEventException e) {
log.log(LogLevel.DEBUG, "Got malformed event: " + getPayload());
throw e;
}
}
return event;
}
/**
* Return valid representation of log message.
*/
public String toString () {
String threadProcess = VespaFormat.formatThreadProcess(processId, threadId);
String timeStr = VespaFormat.formatTime(time);
return new StringBuilder(timeStr.length()
+ host.length()
+ threadProcess.length()
+ service.length()
+ component.length()
+ level.toString().length()
+ payload.length()
+ 1)
.append(timeStr).append("\t")
.append(host).append("\t")
.append(threadProcess).append("\t")
.append(service).append("\t")
.append(component).append("\t")
.append(level.toString().toLowerCase()).append("\t")
.append(payload).append("\n")
.toString();
}
} | class LogMessage
{
private static Logger log = Logger.getLogger(LogMessage.class.getName());
private static Pattern nativeFormat =
Pattern.compile("^(\\d[^\t]+)\t" +
"([^\t]+)\t" +
"([^\t]+)\t" +
"([^\t]+)\t" +
"([^\t]+)\t" +
"([^\t]+)\t" +
"(.+)$"
);
private Instant time;
private String host;
private long processId;
private long threadId;
private String service;
private String component;
private Level level;
private String payload;
private Event event;
/**
* Private constructor. Log messages should never be instantiated
* directly; only as the result of a static factory method.
*/
private LogMessage (Instant time, String host, long processId, long threadId,
String service, String component, Level level,
String payload)
{
this.time = time;
this.host = host;
this.processId = processId;
this.threadId = threadId;
this.service = service;
this.component = component;
this.level = level;
this.payload = payload;
}
public static LogMessage of(
Instant time, String host, long processId, long threadId,
String service, String component, Level level, String payload) {
return new LogMessage(time, host, processId, threadId, service, component, level, payload);
}
public Instant getTimestamp() {return time;}
/**
* @deprecated Use {@link
*/
@Deprecated(since = "7", forRemoval = true)
public long getTime () {return time.toEpochMilli();}
/**
* @deprecated Use {@link
*/
@Deprecated(since = "7", forRemoval = true)
public long getTimeInSeconds () {return time.getEpochSecond();}
public String getHost () {return host;}
public long getProcessId() {return processId;}
public OptionalLong getThreadId() {return threadId > 0 ? OptionalLong.of(threadId) : OptionalLong.empty();}
/**
* @deprecated Use {@link
*/
@Deprecated(since = "7", forRemoval = true)
public String getThreadProcess () {return VespaFormat.formatThreadProcess(processId, threadId);}
public String getService () {return service;}
public String getComponent () {return component;}
public Level getLevel () {return level;}
public String getPayload () {return payload;}
/**
* Make a log message from the native format of the logging
* package.
*
* @param msg The log message
* @return Returns a LogMessage instance
* @throws InvalidLogFormatException if the log message
* can not be parsed, ie. is invalid, we throw this
* exception.
*/
public static LogMessage parseNativeFormat(String msg) throws InvalidLogFormatException {
Matcher m = nativeFormat.matcher(msg);
if (! m.matches()) {
throw new InvalidLogFormatException(msg);
}
Level msgLevel = LogLevel.parse(m.group(6));
Instant timestamp = parseTimestamp(m.group(1));
String threadProcess = m.group(3);
return new LogMessage(timestamp, m.group(2), parseProcessId(threadProcess), parseThreadId(threadProcess),
m.group(4), m.group(5), msgLevel,
m.group(7));
}
private static Instant parseTimestamp(String timeStr) throws InvalidLogFormatException {
try {
long nanoseconds = (long) (Double.parseDouble(timeStr) * 1_000_000_000L);
return Instant.ofEpochSecond(0, nanoseconds);
} catch (NumberFormatException e) {
throw new InvalidLogFormatException("Invalid time string: " + timeStr);
}
}
private static long parseProcessId(String threadProcess) {
int slashIndex = threadProcess.indexOf('/');
if (slashIndex == -1) {
return Long.parseLong(threadProcess);
}
return Long.parseLong(threadProcess.substring(0, slashIndex));
}
/**
* If the LogMessage was an EVENT then this method can
* be used to get the Event instance representing the
* event. The event instance created the first time
* this method is called and then cached.
*
* TODO: make sure this throws exception!
*
* @return Returns Event instance if this is an event message
* and the payload is correctly formatted. Otherwise
* it will return <code>null</code>.
*
*/
public Event getEvent () throws MalformedEventException {
if ((level == LogLevel.EVENT) && (event == null)) {
try {
event = Event.parse(getPayload());
event.setTime(time.toEpochMilli());
}
catch (MalformedEventException e) {
log.log(LogLevel.DEBUG, "Got malformed event: " + getPayload());
throw e;
}
}
return event;
}
/**
* Return valid representation of log message.
*/
public String toString () {
String threadProcess = VespaFormat.formatThreadProcess(processId, threadId);
String timeStr = VespaFormat.formatTime(time);
return new StringBuilder(timeStr.length()
+ host.length()
+ threadProcess.length()
+ service.length()
+ component.length()
+ level.toString().length()
+ payload.length()
+ 1)
.append(timeStr).append("\t")
.append(host).append("\t")
.append(threadProcess).append("\t")
.append(service).append("\t")
.append(component).append("\t")
.append(level.toString().toLowerCase()).append("\t")
.append(payload).append("\n")
.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
LogMessage that = (LogMessage) o;
return processId == that.processId &&
threadId == that.threadId &&
Objects.equals(time, that.time) &&
Objects.equals(host, that.host) &&
Objects.equals(service, that.service) &&
Objects.equals(component, that.component) &&
Objects.equals(level, that.level) &&
Objects.equals(payload, that.payload) &&
Objects.equals(event, that.event);
}
@Override
public int hashCode() {
return Objects.hash(time, host, processId, threadId, service, component, level, payload, event);
}
} |
I think in this case we should setError() on the rpc request and return it? | public void run() {
try {
byte compressionType = rpcRequest.parameters().get(0).asInt8();
if (compressionType != 0) {
rpcRequest.setError(0, "Invalid compression type: " + compressionType);
rpcRequest.returnRequest();
return;
}
int uncompressedSize = rpcRequest.parameters().get(1).asInt32();
byte[] logRequestPayload = rpcRequest.parameters().get(2).asData();
if (uncompressedSize != logRequestPayload.length) {
rpcRequest.setError(1, String.format("Invalid uncompressed size: got %d while data is of size %d ", uncompressedSize, logRequestPayload.length));
rpcRequest.returnRequest();
return;
}
logDispatcher.handle(ProtobufSerialization.fromLogRequest(logRequestPayload));
rpcRequest.returnValues().add(new Int8Value((byte)0));
byte[] responsePayload = ProtobufSerialization.toLogResponse();
rpcRequest.returnValues().add(new Int32Value(responsePayload.length));
rpcRequest.returnValues().add(new DataValue(responsePayload));
rpcRequest.returnRequest();
} catch (Exception e) {
log.log(Level.WARNING, e, () -> "Failed to handle log request: " + e.getMessage());
}
} | log.log(Level.WARNING, e, () -> "Failed to handle log request: " + e.getMessage()); | public void run() {
try {
byte compressionType = rpcRequest.parameters().get(0).asInt8();
if (compressionType != 0) {
rpcRequest.setError(ErrorCode.METHOD_FAILED, "Invalid compression type: " + compressionType);
rpcRequest.returnRequest();
return;
}
int uncompressedSize = rpcRequest.parameters().get(1).asInt32();
byte[] logRequestPayload = rpcRequest.parameters().get(2).asData();
if (uncompressedSize != logRequestPayload.length) {
rpcRequest.setError(ErrorCode.METHOD_FAILED, String.format("Invalid uncompressed size: got %d while data is of size %d ", uncompressedSize, logRequestPayload.length));
rpcRequest.returnRequest();
return;
}
logDispatcher.handle(ProtobufSerialization.fromLogRequest(logRequestPayload));
rpcRequest.returnValues().add(new Int8Value((byte)0));
byte[] responsePayload = ProtobufSerialization.toLogResponse();
rpcRequest.returnValues().add(new Int32Value(responsePayload.length));
rpcRequest.returnValues().add(new DataValue(responsePayload));
rpcRequest.returnRequest();
} catch (Exception e) {
String errorMessage = "Failed to handle log request: " + e.getMessage();
log.log(Level.WARNING, e, () -> errorMessage);
rpcRequest.setError(ErrorCode.METHOD_FAILED, errorMessage);
rpcRequest.returnRequest();
}
} | class ArchiveLogMessagesTask implements Runnable {
final Request rpcRequest;
final LogDispatcher logDispatcher;
ArchiveLogMessagesTask(Request rpcRequest, LogDispatcher logDispatcher) {
this.rpcRequest = rpcRequest;
this.logDispatcher = logDispatcher;
}
@Override
} | class ArchiveLogMessagesTask implements Runnable {
final Request rpcRequest;
final LogDispatcher logDispatcher;
ArchiveLogMessagesTask(Request rpcRequest, LogDispatcher logDispatcher) {
this.rpcRequest = rpcRequest;
this.logDispatcher = logDispatcher;
}
@Override
} |
Agree - see new commits. | public void run() {
try {
byte compressionType = rpcRequest.parameters().get(0).asInt8();
if (compressionType != 0) {
rpcRequest.setError(0, "Invalid compression type: " + compressionType);
rpcRequest.returnRequest();
return;
}
int uncompressedSize = rpcRequest.parameters().get(1).asInt32();
byte[] logRequestPayload = rpcRequest.parameters().get(2).asData();
if (uncompressedSize != logRequestPayload.length) {
rpcRequest.setError(1, String.format("Invalid uncompressed size: got %d while data is of size %d ", uncompressedSize, logRequestPayload.length));
rpcRequest.returnRequest();
return;
}
logDispatcher.handle(ProtobufSerialization.fromLogRequest(logRequestPayload));
rpcRequest.returnValues().add(new Int8Value((byte)0));
byte[] responsePayload = ProtobufSerialization.toLogResponse();
rpcRequest.returnValues().add(new Int32Value(responsePayload.length));
rpcRequest.returnValues().add(new DataValue(responsePayload));
rpcRequest.returnRequest();
} catch (Exception e) {
log.log(Level.WARNING, e, () -> "Failed to handle log request: " + e.getMessage());
}
} | log.log(Level.WARNING, e, () -> "Failed to handle log request: " + e.getMessage()); | public void run() {
try {
byte compressionType = rpcRequest.parameters().get(0).asInt8();
if (compressionType != 0) {
rpcRequest.setError(ErrorCode.METHOD_FAILED, "Invalid compression type: " + compressionType);
rpcRequest.returnRequest();
return;
}
int uncompressedSize = rpcRequest.parameters().get(1).asInt32();
byte[] logRequestPayload = rpcRequest.parameters().get(2).asData();
if (uncompressedSize != logRequestPayload.length) {
rpcRequest.setError(ErrorCode.METHOD_FAILED, String.format("Invalid uncompressed size: got %d while data is of size %d ", uncompressedSize, logRequestPayload.length));
rpcRequest.returnRequest();
return;
}
logDispatcher.handle(ProtobufSerialization.fromLogRequest(logRequestPayload));
rpcRequest.returnValues().add(new Int8Value((byte)0));
byte[] responsePayload = ProtobufSerialization.toLogResponse();
rpcRequest.returnValues().add(new Int32Value(responsePayload.length));
rpcRequest.returnValues().add(new DataValue(responsePayload));
rpcRequest.returnRequest();
} catch (Exception e) {
String errorMessage = "Failed to handle log request: " + e.getMessage();
log.log(Level.WARNING, e, () -> errorMessage);
rpcRequest.setError(ErrorCode.METHOD_FAILED, errorMessage);
rpcRequest.returnRequest();
}
} | class ArchiveLogMessagesTask implements Runnable {
final Request rpcRequest;
final LogDispatcher logDispatcher;
ArchiveLogMessagesTask(Request rpcRequest, LogDispatcher logDispatcher) {
this.rpcRequest = rpcRequest;
this.logDispatcher = logDispatcher;
}
@Override
} | class ArchiveLogMessagesTask implements Runnable {
final Request rpcRequest;
final LogDispatcher logDispatcher;
ArchiveLogMessagesTask(Request rpcRequest, LogDispatcher logDispatcher) {
this.rpcRequest = rpcRequest;
this.logDispatcher = logDispatcher;
}
@Override
} |
Constant can be static imported to optimize readability. | private void testContainerOnLogserverHost(String services, boolean useDedicatedNodeForLogserver) {
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.useDedicatedNodeForLogserver(useDedicatedNodeForLogserver);
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Admin admin = model.getAdmin();
Logserver logserver = admin.getLogserver();
HostResource hostResource = logserver.getHostResource();
assertNotNull(hostResource.getService("logserver"));
String containerServiceType = ContainerServiceType.LOGSERVER_CONTAINER.serviceName;
assertNotNull(hostResource.getService(containerServiceType));
String configId = admin.getLogserver().getHostResource().getService(containerServiceType).getConfigId();
ApplicationMetadataConfig.Builder builder = new ApplicationMetadataConfig.Builder();
model.getConfig(builder, configId);
ApplicationMetadataConfig cfg = new ApplicationMetadataConfig(builder);
assertEquals(1, cfg.generation());
LogdConfig.Builder logdConfigBuilder = new LogdConfig.Builder();
model.getConfig(logdConfigBuilder, configId);
LogdConfig logdConfig = new LogdConfig(logdConfigBuilder);
assertTrue(logdConfig.logserver().use());
} | String containerServiceType = ContainerServiceType.LOGSERVER_CONTAINER.serviceName; | private void testContainerOnLogserverHost(String services, boolean useDedicatedNodeForLogserver) {
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.useDedicatedNodeForLogserver(useDedicatedNodeForLogserver);
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Admin admin = model.getAdmin();
Logserver logserver = admin.getLogserver();
HostResource hostResource = logserver.getHostResource();
assertNotNull(hostResource.getService("logserver"));
String containerServiceType = ContainerServiceType.LOGSERVER_CONTAINER.serviceName;
assertNotNull(hostResource.getService(containerServiceType));
String configId = admin.getLogserver().getHostResource().getService(containerServiceType).getConfigId();
ApplicationMetadataConfig.Builder builder = new ApplicationMetadataConfig.Builder();
model.getConfig(builder, configId);
ApplicationMetadataConfig cfg = new ApplicationMetadataConfig(builder);
assertEquals(1, cfg.generation());
LogdConfig.Builder logdConfigBuilder = new LogdConfig.Builder();
model.getConfig(logdConfigBuilder, configId);
LogdConfig logdConfig = new LogdConfig(logdConfigBuilder);
assertTrue(logdConfig.logserver().use());
} | class ModelProvisioningTest {
@Test
public void testNodeCountForJdisc() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>\n" +
"\n" +
"<admin version='3.0'><nodes count='1' /></admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count=\"3\"/>" +
"</jdisc>" +
"<jdisc id='mydisc2' version='1.0'>" +
" <document-processing/>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count='2' allocated-memory='45%' jvm-gc-options='-XX:+UseParNewGC' jvm-options='-verbosegc' preload='lib/blablamalloc.so'/>" +
"</jdisc>" +
"</services>";
String hosts ="<hosts>"
+ " <host name='myhost0'>"
+ " <alias>node0</alias>"
+ " </host>"
+ " <host name='myhost1'>"
+ " <alias>node1</alias>"
+ " </host>"
+ " <host name='myhost2'>"
+ " <alias>node2</alias>"
+ " </host>"
+ " <host name='myhost3'>"
+ " <alias>node3</alias>"
+ " </host>"
+ " <host name='myhost4'>"
+ " <alias>node4</alias>"
+ " </host>"
+ " <host name='myhost5'>"
+ " <alias>node5</alias>"
+ " </host>"
+ "</hosts>";
VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services);
VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true)));
ApplicationContainerCluster mydisc = model.getContainerClusters().get("mydisc");
ApplicationContainerCluster mydisc2 = model.getContainerClusters().get("mydisc2");
assertThat(mydisc.getContainers().size(), is(3));
assertThat(mydisc.getContainers().get(0).getConfigId(), is("mydisc/container.0"));
assertTrue(mydisc.getContainers().get(0).isInitialized());
assertThat(mydisc.getContainers().get(1).getConfigId(), is("mydisc/container.1"));
assertTrue(mydisc.getContainers().get(1).isInitialized());
assertThat(mydisc.getContainers().get(2).getConfigId(), is("mydisc/container.2"));
assertTrue(mydisc.getContainers().get(2).isInitialized());
assertThat(mydisc2.getContainers().size(), is(2));
assertThat(mydisc2.getContainers().get(0).getConfigId(), is("mydisc2/container.0"));
assertTrue(mydisc2.getContainers().get(0).isInitialized());
assertThat(mydisc2.getContainers().get(1).getConfigId(), is("mydisc2/container.1"));
assertTrue(mydisc2.getContainers().get(1).isInitialized());
assertThat(mydisc.getContainers().get(0).getJvmOptions(), is(""));
assertThat(mydisc.getContainers().get(1).getJvmOptions(), is(""));
assertThat(mydisc.getContainers().get(2).getJvmOptions(), is(""));
assertThat(mydisc.getContainers().get(0).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(mydisc.getContainers().get(1).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(mydisc.getContainers().get(2).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(mydisc.getMemoryPercentage(), is(Optional.empty()));
assertThat(mydisc2.getContainers().get(0).getJvmOptions(), is("-verbosegc"));
assertThat(mydisc2.getContainers().get(1).getJvmOptions(), is("-verbosegc"));
assertThat(mydisc2.getContainers().get(0).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(mydisc2.getContainers().get(1).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(mydisc2.getMemoryPercentage(), is(Optional.of(45)));
assertThat(mydisc2.getJvmGCOptions(), is(Optional.of("-XX:+UseParNewGC")));
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
mydisc2.getConfig(qrStartBuilder);
QrStartConfig qrsStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals(45, qrsStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
HostSystem hostSystem = model.getHostSystem();
assertNotNull(hostSystem.getHostByHostname("myhost0"));
assertNotNull(hostSystem.getHostByHostname("myhost1"));
assertNotNull(hostSystem.getHostByHostname("myhost2"));
assertNotNull(hostSystem.getHostByHostname("myhost3"));
assertNull(hostSystem.getHostByHostname("Nope"));
}
@Test
public void testNodeCountForContentGroup() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
"\n" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
int numberOfHosts = 2;
tester.addHosts(numberOfHosts);
int numberOfContentNodes = 2;
VespaModel model = tester.createModel(xmlWithNodes, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
final Map<String, ContentCluster> contentClusters = model.getContentClusters();
ContentCluster cluster = contentClusters.get("bar");
assertThat(cluster.getRootGroup().getNodes().size(), is(numberOfContentNodes));
int i = 0;
for (StorageNode node : cluster.getRootGroup().getNodes())
assertEquals(i++, node.getDistributionKey());
}
@Test
public void testSeparateClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(3);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 1, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size for container", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testClusterMembership() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(1, model.getHostSystem().getHosts().size());
HostResource host = model.getHostSystem().getHosts().iterator().next();
assertEquals(1, host.clusterMemberships().size());
ClusterMembership membership = host.clusterMemberships().iterator().next();
assertEquals("container", membership.cluster().type().name());
assertEquals("container1", membership.cluster().id().value());
}
@Test
public void testCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size is lowered with combined clusters",
17, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testCombinedClusterWithJvmOptions() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <document-processing/>" +
" <nodes of='content1' jvm-options='testoption'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
for (Container container : model.getContainerClusters().get("container1").getContainers())
assertTrue(container.getJvmOptions().contains("testoption"));
}
@Test
public void testMultipleCombinedClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='content1'/>" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes of='content2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Nodes in content2", 3, model.getContentClusters().get("content2").getRootGroup().getNodes().size());
assertEquals("Nodes in container2", 3, model.getContainerClusters().get("container2").getContainers().size());
}
@Test
public void testNonExistingCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2' but this service is not defined", e.getMessage());
}
}
@Test
public void testInvalidCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes count='2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2', but that is not a content service", e.getMessage());
}
}
@Test
public void testUsingNodesAndGroupCountAttributes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 64;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals(1, model.getContainerClusters().size());
Set<com.yahoo.vespa.model.Host> containerHosts = model.getContainerClusters().get("foo").getContainers().stream().map(Container::getHost).collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<com.yahoo.vespa.model.Host> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts));
assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost()));
assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size());
assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers());
ContentCluster cluster = model.getContentClusters().get("bar");
ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default54", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default51", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default48", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3"));
assertEquals("default51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5"));
assertEquals("default48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getDistributionKey(), is(24));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getConfigId(), is("bar/storage/24"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getDistributionKey(), is(25));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getConfigId(), is("bar/storage/25"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26"));
cluster = model.getContentClusters().get("baz");
clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("baz-controllers", clusterControllers.getName());
assertEquals("default27", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default26", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default25", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("baz/storage/0"));
assertEquals("default27", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("baz/storage/1"));
assertEquals("default26", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default25", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(26).getIndex(), is("26"));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getConfigId(), is("baz/storage/26"));
}
@Test
public void testGroupsOfSize1() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='8'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default08", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default07", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default06", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(8, cluster.getRootGroup().getSubgroups().size());
assertEquals(8, cluster.distributionBits());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/1"));
assertEquals("default07", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(7).getIndex(), is("7"));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getDistributionKey(), is(7));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getConfigId(), is("bar/storage/7"));
assertEquals("default01", cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getHostName());
}
@Test
public void testExplicitNonDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='6'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals( 8, cluster.distributionBits());
assertEquals("We get the closest odd number", 5, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default09", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default08", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default06", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default05", clusterControllers.getContainers().get(3).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(4).getHostName());
assertEquals("default09", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getHostName());
assertEquals("default06", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default03", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
}
@Test
public void testClusterControllersWithGroupSize2() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='4'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default08", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default06", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default04", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testClusterControllersCanSupplementWithAllContainerClusters() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo1'>" +
" <nodes count='2'/>" +
" </container>" +
" <container version='1.0' id='foo2'>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='5'/></controllers>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void testClusterControllersAreNotPlacedOnRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default06", "default03");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("Skipping retired default09", "default08", clusterControllers.getContainers().get(0).getHostName());
assertEquals("Skipping retired default06", "default05", clusterControllers.getContainers().get(1).getHostName());
assertEquals("Skipping retired default03", "default02", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(3).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default08");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default08", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(4).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='bar'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default12", "default03", "default02");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size());
assertEquals("default04", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default13", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("Included in addition because it is retired", "default12", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("default01", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default02", model.getAdmin().getSlobroks().get(4).getHostName());
assertEquals("Included in addition because it is retired", "default03", model.getAdmin().getSlobroks().get(5).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClustersExceptNodeAdmin() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='routing'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='node-admin'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.setApplicationId("hosted-vespa", "routing", "default");
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Set<String> routingHosts = getClusterHostnames(model, "routing");
assertEquals(10, routingHosts.size());
Set<String> nodeAdminHosts = getClusterHostnames(model, "node-admin");
assertEquals(3, nodeAdminHosts.size());
Set<String> slobrokHosts = model.getAdmin().getSlobroks().stream()
.map(AbstractService::getHostName)
.collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertThat(slobrokHosts, everyItem(isIn(routingHosts)));
assertThat(slobrokHosts, everyItem(not(isIn(nodeAdminHosts))));
}
private Set<String> getClusterHostnames(VespaModel model, String clusterId) {
return model.getHosts().stream()
.filter(host -> host.getServices().stream()
.anyMatch(serviceInfo -> Objects.equals(
serviceInfo.getProperty("clustername"),
Optional.of(clusterId))))
.map(HostInfo::getHostname)
.collect(Collectors.toSet());
}
@Test
public void test2ContentNodesProduces1ClusterController() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void test2ContentNodesWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='3'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Ignore
@Test
public void test2ContentNodesOn2ClustersWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='3' flavor='container-node'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content1-node'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content2-node'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts("container-node", 3);
tester.addHosts("content1-node", 2);
tester.addHosts("content2-node", 2);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster1 = model.getContentClusters().get("content1");
ClusterControllerContainerCluster clusterControllers1 = cluster1.getClusterControllers();
assertEquals(1, clusterControllers1.getContainers().size());
assertEquals("content1-node0", clusterControllers1.getContainers().get(0).getHostName());
assertEquals("content1-node1", clusterControllers1.getContainers().get(1).getHostName());
assertEquals("container-node0", clusterControllers1.getContainers().get(2).getHostName());
ContentCluster cluster2 = model.getContentClusters().get("content2");
ClusterControllerContainerCluster clusterControllers2 = cluster2.getClusterControllers();
assertEquals(3, clusterControllers2.getContainers().size());
assertEquals("content2-node0", clusterControllers2.getContainers().get(0).getHostName());
assertEquals("content2-node1", clusterControllers2.getContainers().get(1).getHostName());
assertEquals("We do not pick the container used to supplement another cluster",
"container-node1", clusterControllers2.getContainers().get(2).getHostName());
}
@Test
public void testExplicitDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='true' count='4'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(4, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default04", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default01", clusterControllers.getContainers().get(3).getHostName());
}
@Test
public void testLogserverContainerWhenDedicatedLogserver() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers>" +
" <nodes count='1' dedicated='true'/>" +
" </logservers>" +
" </admin>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
boolean useDedicatedNodeForLogserver = false;
testContainerOnLogserverHost(services, useDedicatedNodeForLogserver);
}
@Test
public void testImplicitLogserverContainer() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
boolean useDedicatedNodeForLogserver = true;
testContainerOnLogserverHost(services, useDedicatedNodeForLogserver);
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveReadyCopies());
assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(3, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/3"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getIndex(), is("2"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getConfigId(), is("bar/storage/5"));
}
@Test
public void testUsingNodesCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 4;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(4, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(4, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(4, cluster.redundancy().effectiveReadyCopies());
assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(4));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getNodes().get(3).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getNodes().get(3).getConfigId(), is("bar/storage/3"));
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test(expected = IllegalArgumentException.class)
public void testRequiringMoreNodesThanAreAvailable() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
}
@Test
public void testUsingNodesCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test
public void testRequestingSpecificFlavors() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers><nodes count='1' dedicated='true' flavor='logserver-flavor'/></logservers>" +
" <slobroks><nodes count='2' dedicated='true' flavor='slobrok-flavor'/></slobroks>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <nodes count='4' flavor='container-flavor'/>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='2' dedicated='true' flavor='controller-foo-flavor'/></controllers>" +
" <nodes count='5' flavor='content-foo-flavor'/>" +
" </content>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='3' dedicated='true' flavor='controller-bar-flavor'/></controllers>" +
" <nodes count='6' flavor='content-bar-flavor'/>" +
" </content>" +
"</services>";
int totalHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts("logserver-flavor", 1);
tester.addHosts("slobrok-flavor", 2);
tester.addHosts("container-flavor", 4);
tester.addHosts("controller-foo-flavor", 2);
tester.addHosts("content-foo-flavor", 5);
tester.addHosts("controller-bar-flavor", 3);
tester.addHosts("content-bar-flavor", 6);
VespaModel model = tester.createModel(services, true, 0);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(totalHosts));
}
@Test
public void testJDiscOnly() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes count='3'/>" +
"</jdisc>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().getHostSystem().getHosts().size());
assertEquals(3, model.getContainerClusters().get("jdisc").getContainers().size());
assertNotNull(model.getAdmin().getLogserver());
assertEquals(3, model.getAdmin().getSlobroks().size());
}
@Test
public void testJvmArgs() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes jvmargs='xyz' count='3'/>" +
"</jdisc>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().getHostSystem().getHosts().size());
assertEquals("xyz", model.getContainerClusters().get("jdisc").getContainers().get(0).getAssignedJvmOptions());
}
@Test
public void testJvmOptions() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes jvm-options='xyz' count='3'/>" +
"</jdisc>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().getHostSystem().getHosts().size());
assertEquals("xyz", model.getContainerClusters().get("jdisc").getContainers().get(0).getAssignedJvmOptions());
}
@Test
public void testJvmOptionsOverridesJvmArgs() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes jvm-options='xyz' jvmargs='abc' count='3'/>" +
"</jdisc>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
try {
tester.createModel(services, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("You have specified both jvm-options='xyz' and deprecated jvmargs='abc'. Merge jvmargs into jvm-options.", e.getMessage());
}
}
@Test
public void testUsingHostaliasWithProvisioner() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<admin version='2.0'>" +
" <adminserver hostalias='node1'/>\n"+
"</admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
"</jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedVespa() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<jdisc id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" +
" </http>" +
"</jdisc>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertThat(model.getHosts().size(), is(1));
assertThat(model.getContainerClusters().size(), is(1));
}
@Test
public void testNoNodeTagMeans1Node() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
@Test
public void testNoNodeTagMeans1NodeNoContent() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testNoNodeTagMeans1NodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size());
}
@Test
public void testSingleNodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" <nodes><node hostalias='foo'/></nodes>"+
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
/** Recreate the combination used in some factory tests */
@Test
public void testMultitenantButNotHosted() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testModelWithReferencedIndexingCluster() {
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(1, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testSharedNodesNotHosted() {
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" <node hostalias=\"vespa-2\"/>\n" +
" <node hostalias=\"vespa-3\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" +
" <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedModel(false, hosts, services);
assertEquals(3, model.getRoot().getHostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(3, content.getRootGroup().getNodes().size());
}
@Test
public void testMultitenantButNotHostedSharedContentNode() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" <content id='search' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" </group>" +
" <documents>" +
" <document type='type1'/>" +
" </documents>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
private VespaModel createNonProvisionedMultitenantModel(String services) {
return createNonProvisionedModel(true, null, services);
}
private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) {
VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, ApplicationPackageUtils.generateSearchDefinition("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
DeployState deployState = new DeployState.Builder().applicationPackage(appPkg).
properties((new TestProperties()).setMultitenant(multitenant)).
build();
return modelCreatorWithMockPkg.create(false, deployState);
}
@Test
public void testThatTldConfigIdsAreDeterministic() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <jdisc version='1.0' id='jdisc0'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <jdisc version='1.0' id='jdisc1'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <content version='1.0' id='content0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 8;
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
0);
}
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts + 1);
VespaModel model = tester.createModel(services, true, 1, "default0");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
1);
}
}
private void checkThatTldAndContainerRunningOnSameHostHaveSameId(Collection<ApplicationContainerCluster> containerClusters,
Collection<ContentCluster> contentClusters,
int startIndexForContainerIds) {
for (ContentCluster contentCluster : contentClusters) {
String contentClusterName = contentCluster.getName();
int i = 0;
for (ApplicationContainerCluster containerCluster : containerClusters) {
String containerClusterName = containerCluster.getName();
for (int j = 0; j < 2; j++) {
Dispatch tld = contentCluster.getSearch().getIndexed().getTLDs().get(2 * i + j);
ApplicationContainer container = containerCluster.getContainers().get(j);
int containerConfigIdIndex = j + startIndexForContainerIds;
assertEquals(container.getHostName(), tld.getHostname());
assertEquals(contentClusterName + "/search/cluster." + contentClusterName + "/tlds/" +
containerClusterName + "." + containerConfigIdIndex + ".tld." + containerConfigIdIndex,
tld.getConfigId());
assertEquals(containerClusterName + "/" + "container." + containerConfigIdIndex,
container.getConfigId());
}
i++;
}
}
}
private int physicalMemoryPercentage(ContainerCluster cluster) {
QrStartConfig.Builder b = new QrStartConfig.Builder();
cluster.getConfig(b);
return new QrStartConfig(b).jvm().heapSizeAsPercentageOfPhysicalMemory();
}
@Test
public void require_that_proton_config_is_tuned_based_on_node_flavor() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='2' flavor='content-test-flavor'/>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(createFlavorFromDiskSetting("content-test-flavor", false), 2);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
assertEquals(2, cluster.getSearchNodes().size());
assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001);
assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001);
}
private static Flavor createFlavorFromDiskSetting(String name, boolean fastDisk) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).fastDisk(fastDisk)));
}
private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
List<SearchNode> searchNodes = cluster.getSearchNodes();
assertTrue(searchNodeIdx < searchNodes.size());
searchNodes.get(searchNodeIdx).getConfig(builder);
return new ProtonConfig(builder);
}
@Test
public void require_that_config_override_and_explicit_proton_tuning_have_precedence_over_default_node_flavor_tuning() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <config name='vespa.config.search.core.proton'>",
" <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>",
" </config>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='1' flavor='content-test-flavor'/>",
" <engine>",
" <proton>",
" <tuning>",
" <searchnode>",
" <flushstrategy>",
" <native>",
" <total>",
" <maxmemorygain>1000</maxmemorygain>",
" </total>",
" </native>",
" </flushstrategy>",
" </searchnode>",
" </tuning>",
" </proton>",
" </engine>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts("default", 1);
tester.addHosts(createFlavorFromMemoryAndDisk("content-test-flavor", 128, 100), 1);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId());
assertEquals(2000, cfg.flush().memory().maxtlssize());
assertEquals(1000, cfg.flush().memory().maxmemory());
assertEquals((long) 16 * GB, cfg.flush().memory().each().maxmemory());
}
private static long GB = 1024 * 1024 * 1024;
private static Flavor createFlavorFromMemoryAndDisk(String name, int memoryGb, int diskGb) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).minMainMemoryAvailableGb(memoryGb).minDiskAvailableGb(diskGb)));
}
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
model.getConfig(builder, configId);
return new ProtonConfig(builder);
}
} | class ModelProvisioningTest {
@Test
public void testNodeCountForJdisc() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>\n" +
"\n" +
"<admin version='3.0'><nodes count='1' /></admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count=\"3\"/>" +
"</jdisc>" +
"<jdisc id='mydisc2' version='1.0'>" +
" <document-processing/>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count='2' allocated-memory='45%' jvm-gc-options='-XX:+UseParNewGC' jvm-options='-verbosegc' preload='lib/blablamalloc.so'/>" +
"</jdisc>" +
"</services>";
String hosts ="<hosts>"
+ " <host name='myhost0'>"
+ " <alias>node0</alias>"
+ " </host>"
+ " <host name='myhost1'>"
+ " <alias>node1</alias>"
+ " </host>"
+ " <host name='myhost2'>"
+ " <alias>node2</alias>"
+ " </host>"
+ " <host name='myhost3'>"
+ " <alias>node3</alias>"
+ " </host>"
+ " <host name='myhost4'>"
+ " <alias>node4</alias>"
+ " </host>"
+ " <host name='myhost5'>"
+ " <alias>node5</alias>"
+ " </host>"
+ "</hosts>";
VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services);
VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true)));
ApplicationContainerCluster mydisc = model.getContainerClusters().get("mydisc");
ApplicationContainerCluster mydisc2 = model.getContainerClusters().get("mydisc2");
assertThat(mydisc.getContainers().size(), is(3));
assertThat(mydisc.getContainers().get(0).getConfigId(), is("mydisc/container.0"));
assertTrue(mydisc.getContainers().get(0).isInitialized());
assertThat(mydisc.getContainers().get(1).getConfigId(), is("mydisc/container.1"));
assertTrue(mydisc.getContainers().get(1).isInitialized());
assertThat(mydisc.getContainers().get(2).getConfigId(), is("mydisc/container.2"));
assertTrue(mydisc.getContainers().get(2).isInitialized());
assertThat(mydisc2.getContainers().size(), is(2));
assertThat(mydisc2.getContainers().get(0).getConfigId(), is("mydisc2/container.0"));
assertTrue(mydisc2.getContainers().get(0).isInitialized());
assertThat(mydisc2.getContainers().get(1).getConfigId(), is("mydisc2/container.1"));
assertTrue(mydisc2.getContainers().get(1).isInitialized());
assertThat(mydisc.getContainers().get(0).getJvmOptions(), is(""));
assertThat(mydisc.getContainers().get(1).getJvmOptions(), is(""));
assertThat(mydisc.getContainers().get(2).getJvmOptions(), is(""));
assertThat(mydisc.getContainers().get(0).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(mydisc.getContainers().get(1).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(mydisc.getContainers().get(2).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(mydisc.getMemoryPercentage(), is(Optional.empty()));
assertThat(mydisc2.getContainers().get(0).getJvmOptions(), is("-verbosegc"));
assertThat(mydisc2.getContainers().get(1).getJvmOptions(), is("-verbosegc"));
assertThat(mydisc2.getContainers().get(0).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(mydisc2.getContainers().get(1).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(mydisc2.getMemoryPercentage(), is(Optional.of(45)));
assertThat(mydisc2.getJvmGCOptions(), is(Optional.of("-XX:+UseParNewGC")));
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
mydisc2.getConfig(qrStartBuilder);
QrStartConfig qrsStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals(45, qrsStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
HostSystem hostSystem = model.getHostSystem();
assertNotNull(hostSystem.getHostByHostname("myhost0"));
assertNotNull(hostSystem.getHostByHostname("myhost1"));
assertNotNull(hostSystem.getHostByHostname("myhost2"));
assertNotNull(hostSystem.getHostByHostname("myhost3"));
assertNull(hostSystem.getHostByHostname("Nope"));
}
@Test
public void testNodeCountForContentGroup() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
"\n" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
int numberOfHosts = 2;
tester.addHosts(numberOfHosts);
int numberOfContentNodes = 2;
VespaModel model = tester.createModel(xmlWithNodes, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
final Map<String, ContentCluster> contentClusters = model.getContentClusters();
ContentCluster cluster = contentClusters.get("bar");
assertThat(cluster.getRootGroup().getNodes().size(), is(numberOfContentNodes));
int i = 0;
for (StorageNode node : cluster.getRootGroup().getNodes())
assertEquals(i++, node.getDistributionKey());
}
@Test
public void testSeparateClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(3);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 1, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size for container", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testClusterMembership() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(1, model.getHostSystem().getHosts().size());
HostResource host = model.getHostSystem().getHosts().iterator().next();
assertEquals(1, host.clusterMemberships().size());
ClusterMembership membership = host.clusterMemberships().iterator().next();
assertEquals("container", membership.cluster().type().name());
assertEquals("container1", membership.cluster().id().value());
}
@Test
public void testCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size is lowered with combined clusters",
17, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testCombinedClusterWithJvmOptions() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <document-processing/>" +
" <nodes of='content1' jvm-options='testoption'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
for (Container container : model.getContainerClusters().get("container1").getContainers())
assertTrue(container.getJvmOptions().contains("testoption"));
}
@Test
public void testMultipleCombinedClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='content1'/>" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes of='content2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Nodes in content2", 3, model.getContentClusters().get("content2").getRootGroup().getNodes().size());
assertEquals("Nodes in container2", 3, model.getContainerClusters().get("container2").getContainers().size());
}
@Test
public void testNonExistingCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2' but this service is not defined", e.getMessage());
}
}
@Test
public void testInvalidCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes count='2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2', but that is not a content service", e.getMessage());
}
}
@Test
public void testUsingNodesAndGroupCountAttributes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 64;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals(1, model.getContainerClusters().size());
Set<com.yahoo.vespa.model.Host> containerHosts = model.getContainerClusters().get("foo").getContainers().stream().map(Container::getHost).collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<com.yahoo.vespa.model.Host> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts));
assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost()));
assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size());
assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers());
ContentCluster cluster = model.getContentClusters().get("bar");
ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default54", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default51", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default48", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3"));
assertEquals("default51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5"));
assertEquals("default48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getDistributionKey(), is(24));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getConfigId(), is("bar/storage/24"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getDistributionKey(), is(25));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getConfigId(), is("bar/storage/25"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26"));
cluster = model.getContentClusters().get("baz");
clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("baz-controllers", clusterControllers.getName());
assertEquals("default27", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default26", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default25", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("baz/storage/0"));
assertEquals("default27", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("baz/storage/1"));
assertEquals("default26", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default25", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(26).getIndex(), is("26"));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getConfigId(), is("baz/storage/26"));
}
@Test
public void testGroupsOfSize1() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='8'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default08", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default07", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default06", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(8, cluster.getRootGroup().getSubgroups().size());
assertEquals(8, cluster.distributionBits());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/1"));
assertEquals("default07", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(7).getIndex(), is("7"));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getDistributionKey(), is(7));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getConfigId(), is("bar/storage/7"));
assertEquals("default01", cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getHostName());
}
@Test
public void testExplicitNonDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='6'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals( 8, cluster.distributionBits());
assertEquals("We get the closest odd number", 5, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default09", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default08", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default06", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default05", clusterControllers.getContainers().get(3).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(4).getHostName());
assertEquals("default09", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getHostName());
assertEquals("default06", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default03", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
}
@Test
public void testClusterControllersWithGroupSize2() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='4'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default08", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default06", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default04", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testClusterControllersCanSupplementWithAllContainerClusters() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo1'>" +
" <nodes count='2'/>" +
" </container>" +
" <container version='1.0' id='foo2'>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='5'/></controllers>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void testClusterControllersAreNotPlacedOnRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default06", "default03");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("Skipping retired default09", "default08", clusterControllers.getContainers().get(0).getHostName());
assertEquals("Skipping retired default06", "default05", clusterControllers.getContainers().get(1).getHostName());
assertEquals("Skipping retired default03", "default02", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(3).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default08");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default08", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(4).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='bar'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default12", "default03", "default02");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size());
assertEquals("default04", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default13", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("Included in addition because it is retired", "default12", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("default01", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default02", model.getAdmin().getSlobroks().get(4).getHostName());
assertEquals("Included in addition because it is retired", "default03", model.getAdmin().getSlobroks().get(5).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClustersExceptNodeAdmin() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='routing'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='node-admin'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.setApplicationId("hosted-vespa", "routing", "default");
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Set<String> routingHosts = getClusterHostnames(model, "routing");
assertEquals(10, routingHosts.size());
Set<String> nodeAdminHosts = getClusterHostnames(model, "node-admin");
assertEquals(3, nodeAdminHosts.size());
Set<String> slobrokHosts = model.getAdmin().getSlobroks().stream()
.map(AbstractService::getHostName)
.collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertThat(slobrokHosts, everyItem(isIn(routingHosts)));
assertThat(slobrokHosts, everyItem(not(isIn(nodeAdminHosts))));
}
private Set<String> getClusterHostnames(VespaModel model, String clusterId) {
return model.getHosts().stream()
.filter(host -> host.getServices().stream()
.anyMatch(serviceInfo -> Objects.equals(
serviceInfo.getProperty("clustername"),
Optional.of(clusterId))))
.map(HostInfo::getHostname)
.collect(Collectors.toSet());
}
@Test
public void test2ContentNodesProduces1ClusterController() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void test2ContentNodesWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='3'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Ignore
@Test
public void test2ContentNodesOn2ClustersWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='3' flavor='container-node'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content1-node'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content2-node'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts("container-node", 3);
tester.addHosts("content1-node", 2);
tester.addHosts("content2-node", 2);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster1 = model.getContentClusters().get("content1");
ClusterControllerContainerCluster clusterControllers1 = cluster1.getClusterControllers();
assertEquals(1, clusterControllers1.getContainers().size());
assertEquals("content1-node0", clusterControllers1.getContainers().get(0).getHostName());
assertEquals("content1-node1", clusterControllers1.getContainers().get(1).getHostName());
assertEquals("container-node0", clusterControllers1.getContainers().get(2).getHostName());
ContentCluster cluster2 = model.getContentClusters().get("content2");
ClusterControllerContainerCluster clusterControllers2 = cluster2.getClusterControllers();
assertEquals(3, clusterControllers2.getContainers().size());
assertEquals("content2-node0", clusterControllers2.getContainers().get(0).getHostName());
assertEquals("content2-node1", clusterControllers2.getContainers().get(1).getHostName());
assertEquals("We do not pick the container used to supplement another cluster",
"container-node1", clusterControllers2.getContainers().get(2).getHostName());
}
@Test
public void testExplicitDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='true' count='4'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(4, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default04", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default01", clusterControllers.getContainers().get(3).getHostName());
}
@Test
public void testLogserverContainerWhenDedicatedLogserver() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers>" +
" <nodes count='1' dedicated='true'/>" +
" </logservers>" +
" </admin>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
boolean useDedicatedNodeForLogserver = false;
testContainerOnLogserverHost(services, useDedicatedNodeForLogserver);
}
@Test
public void testImplicitLogserverContainer() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
boolean useDedicatedNodeForLogserver = true;
testContainerOnLogserverHost(services, useDedicatedNodeForLogserver);
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveReadyCopies());
assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(3, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/3"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getIndex(), is("2"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getConfigId(), is("bar/storage/5"));
}
@Test
public void testUsingNodesCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 4;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(4, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(4, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(4, cluster.redundancy().effectiveReadyCopies());
assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(4));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getNodes().get(3).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getNodes().get(3).getConfigId(), is("bar/storage/3"));
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test(expected = IllegalArgumentException.class)
public void testRequiringMoreNodesThanAreAvailable() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
}
@Test
public void testUsingNodesCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test
public void testRequestingSpecificFlavors() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers><nodes count='1' dedicated='true' flavor='logserver-flavor'/></logservers>" +
" <slobroks><nodes count='2' dedicated='true' flavor='slobrok-flavor'/></slobroks>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <nodes count='4' flavor='container-flavor'/>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='2' dedicated='true' flavor='controller-foo-flavor'/></controllers>" +
" <nodes count='5' flavor='content-foo-flavor'/>" +
" </content>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='3' dedicated='true' flavor='controller-bar-flavor'/></controllers>" +
" <nodes count='6' flavor='content-bar-flavor'/>" +
" </content>" +
"</services>";
int totalHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts("logserver-flavor", 1);
tester.addHosts("slobrok-flavor", 2);
tester.addHosts("container-flavor", 4);
tester.addHosts("controller-foo-flavor", 2);
tester.addHosts("content-foo-flavor", 5);
tester.addHosts("controller-bar-flavor", 3);
tester.addHosts("content-bar-flavor", 6);
VespaModel model = tester.createModel(services, true, 0);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(totalHosts));
}
@Test
public void testJDiscOnly() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes count='3'/>" +
"</jdisc>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().getHostSystem().getHosts().size());
assertEquals(3, model.getContainerClusters().get("jdisc").getContainers().size());
assertNotNull(model.getAdmin().getLogserver());
assertEquals(3, model.getAdmin().getSlobroks().size());
}
@Test
public void testJvmArgs() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes jvmargs='xyz' count='3'/>" +
"</jdisc>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().getHostSystem().getHosts().size());
assertEquals("xyz", model.getContainerClusters().get("jdisc").getContainers().get(0).getAssignedJvmOptions());
}
@Test
public void testJvmOptions() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes jvm-options='xyz' count='3'/>" +
"</jdisc>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().getHostSystem().getHosts().size());
assertEquals("xyz", model.getContainerClusters().get("jdisc").getContainers().get(0).getAssignedJvmOptions());
}
@Test
public void testJvmOptionsOverridesJvmArgs() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes jvm-options='xyz' jvmargs='abc' count='3'/>" +
"</jdisc>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
try {
tester.createModel(services, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("You have specified both jvm-options='xyz' and deprecated jvmargs='abc'. Merge jvmargs into jvm-options.", e.getMessage());
}
}
@Test
public void testUsingHostaliasWithProvisioner() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<admin version='2.0'>" +
" <adminserver hostalias='node1'/>\n"+
"</admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
"</jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedVespa() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<jdisc id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" +
" </http>" +
"</jdisc>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertThat(model.getHosts().size(), is(1));
assertThat(model.getContainerClusters().size(), is(1));
}
@Test
public void testNoNodeTagMeans1Node() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
@Test
public void testNoNodeTagMeans1NodeNoContent() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testNoNodeTagMeans1NodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size());
}
@Test
public void testSingleNodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" <nodes><node hostalias='foo'/></nodes>"+
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
/** Recreate the combination used in some factory tests */
@Test
public void testMultitenantButNotHosted() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testModelWithReferencedIndexingCluster() {
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(1, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
public void testSharedNodesNotHosted() {
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" <node hostalias=\"vespa-2\"/>\n" +
" <node hostalias=\"vespa-3\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" +
" <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedModel(false, hosts, services);
assertEquals(3, model.getRoot().getHostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(3, content.getRootGroup().getNodes().size());
}
@Test
public void testMultitenantButNotHostedSharedContentNode() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" <content id='search' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" </group>" +
" <documents>" +
" <document type='type1'/>" +
" </documents>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
private VespaModel createNonProvisionedMultitenantModel(String services) {
return createNonProvisionedModel(true, null, services);
}
private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) {
VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, ApplicationPackageUtils.generateSearchDefinition("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
DeployState deployState = new DeployState.Builder().applicationPackage(appPkg).
properties((new TestProperties()).setMultitenant(multitenant)).
build();
return modelCreatorWithMockPkg.create(false, deployState);
}
@Test
public void testThatTldConfigIdsAreDeterministic() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <jdisc version='1.0' id='jdisc0'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <jdisc version='1.0' id='jdisc1'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <content version='1.0' id='content0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 8;
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
0);
}
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts + 1);
VespaModel model = tester.createModel(services, true, 1, "default0");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
1);
}
}
private void checkThatTldAndContainerRunningOnSameHostHaveSameId(Collection<ApplicationContainerCluster> containerClusters,
Collection<ContentCluster> contentClusters,
int startIndexForContainerIds) {
for (ContentCluster contentCluster : contentClusters) {
String contentClusterName = contentCluster.getName();
int i = 0;
for (ApplicationContainerCluster containerCluster : containerClusters) {
String containerClusterName = containerCluster.getName();
for (int j = 0; j < 2; j++) {
Dispatch tld = contentCluster.getSearch().getIndexed().getTLDs().get(2 * i + j);
ApplicationContainer container = containerCluster.getContainers().get(j);
int containerConfigIdIndex = j + startIndexForContainerIds;
assertEquals(container.getHostName(), tld.getHostname());
assertEquals(contentClusterName + "/search/cluster." + contentClusterName + "/tlds/" +
containerClusterName + "." + containerConfigIdIndex + ".tld." + containerConfigIdIndex,
tld.getConfigId());
assertEquals(containerClusterName + "/" + "container." + containerConfigIdIndex,
container.getConfigId());
}
i++;
}
}
}
private int physicalMemoryPercentage(ContainerCluster cluster) {
QrStartConfig.Builder b = new QrStartConfig.Builder();
cluster.getConfig(b);
return new QrStartConfig(b).jvm().heapSizeAsPercentageOfPhysicalMemory();
}
@Test
public void require_that_proton_config_is_tuned_based_on_node_flavor() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='2' flavor='content-test-flavor'/>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(createFlavorFromDiskSetting("content-test-flavor", false), 2);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
assertEquals(2, cluster.getSearchNodes().size());
assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001);
assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001);
}
private static Flavor createFlavorFromDiskSetting(String name, boolean fastDisk) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).fastDisk(fastDisk)));
}
private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
List<SearchNode> searchNodes = cluster.getSearchNodes();
assertTrue(searchNodeIdx < searchNodes.size());
searchNodes.get(searchNodeIdx).getConfig(builder);
return new ProtonConfig(builder);
}
@Test
public void require_that_config_override_and_explicit_proton_tuning_have_precedence_over_default_node_flavor_tuning() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <config name='vespa.config.search.core.proton'>",
" <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>",
" </config>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='1' flavor='content-test-flavor'/>",
" <engine>",
" <proton>",
" <tuning>",
" <searchnode>",
" <flushstrategy>",
" <native>",
" <total>",
" <maxmemorygain>1000</maxmemorygain>",
" </total>",
" </native>",
" </flushstrategy>",
" </searchnode>",
" </tuning>",
" </proton>",
" </engine>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts("default", 1);
tester.addHosts(createFlavorFromMemoryAndDisk("content-test-flavor", 128, 100), 1);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId());
assertEquals(2000, cfg.flush().memory().maxtlssize());
assertEquals(1000, cfg.flush().memory().maxmemory());
assertEquals((long) 16 * GB, cfg.flush().memory().each().maxmemory());
}
private static long GB = 1024 * 1024 * 1024;
private static Flavor createFlavorFromMemoryAndDisk(String name, int memoryGb, int diskGb) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).minMainMemoryAvailableGb(memoryGb).minDiskAvailableGb(diskGb)));
}
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
model.getConfig(builder, configId);
return new ProtonConfig(builder);
}
} |
Will this always throw if something with the deployment is not allowed? Should we also check that the returned list of config change actions is empty? | public void testNoOverrideNeededinDev() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
tester.deploy(previous, getServices("book"), Environment.dev, null);
} | tester.deploy(previous, getServices("book"), Environment.dev, null); | public void testNoOverrideNeededinDev() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
tester.deploy(previous, getServices("book"), Environment.dev, null);
} | class ContentTypeRemovalValidatorTest {
@Test
public void testContentTypeRemovalValidation() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
try {
tester.deploy(previous, getServices("book"), Environment.prod, null);
fail("Expected exception due to removal of context type 'music");
}
catch (IllegalArgumentException expected) {
assertEquals("content-type-removal: Type 'music' is removed in content cluster 'test'. " +
"This will cause loss of all data of this type. " +
ValidationOverrides.toAllowMessage(ValidationId.contentTypeRemoval),
Exceptions.toMessageString(expected));
}
}
@Test
public void testOverridingContentTypeRemovalValidation() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
tester.deploy(previous, getServices("book"), Environment.prod, removalOverride);
}
@Test
private static String getServices(String documentType) {
return "<services version='1.0'>" +
" <content id='test' version='1.0'>" +
" <redundancy>1</redundancy>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" <documents>" +
" <document type='" + documentType + "' mode='index'/>" +
" </documents>" +
" <nodes count='1'/>" +
" </content>" +
"</services>";
}
private static final String removalOverride =
"<validation-overrides>\n" +
" <allow until='2000-01-03'>content-type-removal</allow>\n" +
"</validation-overrides>\n";
} | class ContentTypeRemovalValidatorTest {
@Test
public void testContentTypeRemovalValidation() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
try {
tester.deploy(previous, getServices("book"), Environment.prod, null);
fail("Expected exception due to removal of context type 'music");
}
catch (IllegalArgumentException expected) {
assertEquals("content-type-removal: Type 'music' is removed in content cluster 'test'. " +
"This will cause loss of all data of this type. " +
ValidationOverrides.toAllowMessage(ValidationId.contentTypeRemoval),
Exceptions.toMessageString(expected));
}
}
@Test
public void testOverridingContentTypeRemovalValidation() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
tester.deploy(previous, getServices("book"), Environment.prod, removalOverride);
}
@Test
private static String getServices(String documentType) {
return "<services version='1.0'>" +
" <content id='test' version='1.0'>" +
" <redundancy>1</redundancy>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" <documents>" +
" <document type='" + documentType + "' mode='index'/>" +
" </documents>" +
" <nodes count='1'/>" +
" </content>" +
"</services>";
}
private static final String removalOverride =
"<validation-overrides>\n" +
" <allow until='2000-01-03'>content-type-removal</allow>\n" +
"</validation-overrides>\n";
} |
Yes, it would throw if not allowed (see the first test in the class, which does the same but for prod). I don't see why we should check config change actions. This has no impact on them one way or the other. | public void testNoOverrideNeededinDev() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
tester.deploy(previous, getServices("book"), Environment.dev, null);
} | tester.deploy(previous, getServices("book"), Environment.dev, null); | public void testNoOverrideNeededinDev() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
tester.deploy(previous, getServices("book"), Environment.dev, null);
} | class ContentTypeRemovalValidatorTest {
@Test
public void testContentTypeRemovalValidation() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
try {
tester.deploy(previous, getServices("book"), Environment.prod, null);
fail("Expected exception due to removal of context type 'music");
}
catch (IllegalArgumentException expected) {
assertEquals("content-type-removal: Type 'music' is removed in content cluster 'test'. " +
"This will cause loss of all data of this type. " +
ValidationOverrides.toAllowMessage(ValidationId.contentTypeRemoval),
Exceptions.toMessageString(expected));
}
}
@Test
public void testOverridingContentTypeRemovalValidation() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
tester.deploy(previous, getServices("book"), Environment.prod, removalOverride);
}
@Test
private static String getServices(String documentType) {
return "<services version='1.0'>" +
" <content id='test' version='1.0'>" +
" <redundancy>1</redundancy>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" <documents>" +
" <document type='" + documentType + "' mode='index'/>" +
" </documents>" +
" <nodes count='1'/>" +
" </content>" +
"</services>";
}
private static final String removalOverride =
"<validation-overrides>\n" +
" <allow until='2000-01-03'>content-type-removal</allow>\n" +
"</validation-overrides>\n";
} | class ContentTypeRemovalValidatorTest {
@Test
public void testContentTypeRemovalValidation() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
try {
tester.deploy(previous, getServices("book"), Environment.prod, null);
fail("Expected exception due to removal of context type 'music");
}
catch (IllegalArgumentException expected) {
assertEquals("content-type-removal: Type 'music' is removed in content cluster 'test'. " +
"This will cause loss of all data of this type. " +
ValidationOverrides.toAllowMessage(ValidationId.contentTypeRemoval),
Exceptions.toMessageString(expected));
}
}
@Test
public void testOverridingContentTypeRemovalValidation() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
tester.deploy(previous, getServices("book"), Environment.prod, removalOverride);
}
@Test
private static String getServices(String documentType) {
return "<services version='1.0'>" +
" <content id='test' version='1.0'>" +
" <redundancy>1</redundancy>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" <documents>" +
" <document type='" + documentType + "' mode='index'/>" +
" </documents>" +
" <nodes count='1'/>" +
" </content>" +
"</services>";
}
private static final String removalOverride =
"<validation-overrides>\n" +
" <allow until='2000-01-03'>content-type-removal</allow>\n" +
"</validation-overrides>\n";
} |
Ok 👍 | public void testNoOverrideNeededinDev() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
tester.deploy(previous, getServices("book"), Environment.dev, null);
} | tester.deploy(previous, getServices("book"), Environment.dev, null); | public void testNoOverrideNeededinDev() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
tester.deploy(previous, getServices("book"), Environment.dev, null);
} | class ContentTypeRemovalValidatorTest {
@Test
public void testContentTypeRemovalValidation() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
try {
tester.deploy(previous, getServices("book"), Environment.prod, null);
fail("Expected exception due to removal of context type 'music");
}
catch (IllegalArgumentException expected) {
assertEquals("content-type-removal: Type 'music' is removed in content cluster 'test'. " +
"This will cause loss of all data of this type. " +
ValidationOverrides.toAllowMessage(ValidationId.contentTypeRemoval),
Exceptions.toMessageString(expected));
}
}
@Test
public void testOverridingContentTypeRemovalValidation() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
tester.deploy(previous, getServices("book"), Environment.prod, removalOverride);
}
@Test
private static String getServices(String documentType) {
return "<services version='1.0'>" +
" <content id='test' version='1.0'>" +
" <redundancy>1</redundancy>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" <documents>" +
" <document type='" + documentType + "' mode='index'/>" +
" </documents>" +
" <nodes count='1'/>" +
" </content>" +
"</services>";
}
private static final String removalOverride =
"<validation-overrides>\n" +
" <allow until='2000-01-03'>content-type-removal</allow>\n" +
"</validation-overrides>\n";
} | class ContentTypeRemovalValidatorTest {
@Test
public void testContentTypeRemovalValidation() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
try {
tester.deploy(previous, getServices("book"), Environment.prod, null);
fail("Expected exception due to removal of context type 'music");
}
catch (IllegalArgumentException expected) {
assertEquals("content-type-removal: Type 'music' is removed in content cluster 'test'. " +
"This will cause loss of all data of this type. " +
ValidationOverrides.toAllowMessage(ValidationId.contentTypeRemoval),
Exceptions.toMessageString(expected));
}
}
@Test
public void testOverridingContentTypeRemovalValidation() {
ValidationTester tester = new ValidationTester();
VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
tester.deploy(previous, getServices("book"), Environment.prod, removalOverride);
}
@Test
private static String getServices(String documentType) {
return "<services version='1.0'>" +
" <content id='test' version='1.0'>" +
" <redundancy>1</redundancy>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" <documents>" +
" <document type='" + documentType + "' mode='index'/>" +
" </documents>" +
" <nodes count='1'/>" +
" </content>" +
"</services>";
}
private static final String removalOverride =
"<validation-overrides>\n" +
" <allow until='2000-01-03'>content-type-removal</allow>\n" +
"</validation-overrides>\n";
} |
Use `new URIBuilder(originalUri).setScheme("https").build()` ? | private static URI rewriteUri(URI originalUri) {
if (!originalUri.getScheme().equals("http")) {
return originalUri;
}
int port = originalUri.getPort();
int rewrittenPort = port != -1 ? port : 80;
try {
URI rewrittenUri = new URI("https", originalUri.getUserInfo(), originalUri.getHost(), rewrittenPort, originalUri.getPath(), originalUri.getQuery(), originalUri.getFragment());
log.log(Level.FINE, () -> String.format("Uri rewritten from '%s' to '%s'", originalUri, rewrittenUri));
return rewrittenUri;
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
} | URI rewrittenUri = new URI("https", originalUri.getUserInfo(), originalUri.getHost(), rewrittenPort, originalUri.getPath(), originalUri.getQuery(), originalUri.getFragment()); | private static URI rewriteUri(URI originalUri) {
if (!originalUri.getScheme().equals("http")) {
return originalUri;
}
int port = originalUri.getPort();
int rewrittenPort = port != -1 ? port : 80;
try {
URI rewrittenUri = new URIBuilder(originalUri).setScheme("https").setPort(rewrittenPort).build();
log.log(Level.FINE, () -> String.format("Uri rewritten from '%s' to '%s'", originalUri, rewrittenUri));
return rewrittenUri;
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
} | class HttpToHttpsRewritingRequestInterceptor implements HttpRequestInterceptor {
@Override
public void process(HttpRequest request, HttpContext context) {
if (request instanceof HttpRequestBase) {
HttpRequestBase httpUriRequest = (HttpRequestBase) request;
httpUriRequest.setURI(rewriteUri(httpUriRequest.getURI()));
} else {
log.log(Level.FINE, () -> "Not a HttpRequestBase - skipping URI rewriting: " + request.getClass().getName());
}
}
} | class HttpToHttpsRewritingRequestInterceptor implements HttpRequestInterceptor {
@Override
public void process(HttpRequest request, HttpContext context) {
if (request instanceof HttpRequestBase) {
HttpRequestBase httpUriRequest = (HttpRequestBase) request;
httpUriRequest.setURI(rewriteUri(httpUriRequest.getURI()));
} else {
log.log(Level.FINE, () -> "Not a HttpRequestBase - skipping URI rewriting: " + request.getClass().getName());
}
}
} |
Done :) | private static URI rewriteUri(URI originalUri) {
if (!originalUri.getScheme().equals("http")) {
return originalUri;
}
int port = originalUri.getPort();
int rewrittenPort = port != -1 ? port : 80;
try {
URI rewrittenUri = new URI("https", originalUri.getUserInfo(), originalUri.getHost(), rewrittenPort, originalUri.getPath(), originalUri.getQuery(), originalUri.getFragment());
log.log(Level.FINE, () -> String.format("Uri rewritten from '%s' to '%s'", originalUri, rewrittenUri));
return rewrittenUri;
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
} | URI rewrittenUri = new URI("https", originalUri.getUserInfo(), originalUri.getHost(), rewrittenPort, originalUri.getPath(), originalUri.getQuery(), originalUri.getFragment()); | private static URI rewriteUri(URI originalUri) {
if (!originalUri.getScheme().equals("http")) {
return originalUri;
}
int port = originalUri.getPort();
int rewrittenPort = port != -1 ? port : 80;
try {
URI rewrittenUri = new URIBuilder(originalUri).setScheme("https").setPort(rewrittenPort).build();
log.log(Level.FINE, () -> String.format("Uri rewritten from '%s' to '%s'", originalUri, rewrittenUri));
return rewrittenUri;
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
} | class HttpToHttpsRewritingRequestInterceptor implements HttpRequestInterceptor {
@Override
public void process(HttpRequest request, HttpContext context) {
if (request instanceof HttpRequestBase) {
HttpRequestBase httpUriRequest = (HttpRequestBase) request;
httpUriRequest.setURI(rewriteUri(httpUriRequest.getURI()));
} else {
log.log(Level.FINE, () -> "Not a HttpRequestBase - skipping URI rewriting: " + request.getClass().getName());
}
}
} | class HttpToHttpsRewritingRequestInterceptor implements HttpRequestInterceptor {
@Override
public void process(HttpRequest request, HttpContext context) {
if (request instanceof HttpRequestBase) {
HttpRequestBase httpUriRequest = (HttpRequestBase) request;
httpUriRequest.setURI(rewriteUri(httpUriRequest.getURI()));
} else {
log.log(Level.FINE, () -> "Not a HttpRequestBase - skipping URI rewriting: " + request.getClass().getName());
}
}
} |
Not really feedback on this PR, but shouldn't it really have been `useRpc`? | public void getConfig(LogdConfig.Builder builder) {
if (logserver == null) {
builder.logserver(new LogdConfig.Logserver.Builder().use(false));
}
else {
builder.
logserver(new LogdConfig.Logserver.Builder().
userpc(true).
use(logServerContainerCluster.isPresent() || !isHostedVespa).
host(logserver.getHostName()).
rpcport(logserver.getRelativePort(0)).
port(logserver.getRelativePort(1)));
}
} | userpc(true). | public void getConfig(LogdConfig.Builder builder) {
if (logserver == null) {
builder.logserver(new LogdConfig.Logserver.Builder().use(false));
}
else {
builder.
logserver(new LogdConfig.Logserver.Builder().
userpc(true).
use(logServerContainerCluster.isPresent() || !isHostedVespa).
host(logserver.getHostName()).
rpcport(logserver.getRelativePort(0)).
port(logserver.getRelativePort(1)));
}
} | class Admin extends AbstractConfigProducer implements Serializable {
private static final long serialVersionUID = 1L;
private final boolean isHostedVespa;
private final Monitoring monitoring;
private final Metrics metrics;
private final List<Configserver> configservers = new ArrayList<>();
private final List<Slobrok> slobroks = new ArrayList<>();
private Configserver defaultConfigserver;
/** The log server, or null if none */
private Logserver logserver;
private LogForwarder.Config logForwarderConfig = null;
private ApplicationType applicationType = ApplicationType.DEFAULT;
public void setLogForwarderConfig(LogForwarder.Config cfg) {
this.logForwarderConfig = cfg;
}
/**
* The single cluster controller cluster shared by all content clusters by default when not multitenant.
* If multitenant, this is null.
*/
private ClusterControllerContainerCluster clusterControllers;
private Optional<LogserverContainerCluster> logServerContainerCluster = Optional.empty();
private MetricsProxyContainerCluster metricsProxyContainerCluster;
private ZooKeepersConfigProvider zooKeepersConfigProvider;
private FileDistributionConfigProducer fileDistribution;
private final boolean multitenant;
public Admin(AbstractConfigProducer parent,
Monitoring monitoring,
Metrics metrics,
boolean multitenant,
FileDistributionConfigProducer fileDistributionConfigProducer,
boolean isHostedVespa) {
super(parent, "admin");
this.isHostedVespa = isHostedVespa;
this.monitoring = monitoring;
this.metrics = metrics;
this.multitenant = multitenant;
this.fileDistribution = fileDistributionConfigProducer;
}
public Configserver getConfigserver() { return defaultConfigserver; }
/** Returns the configured monitoring endpoint, or null if not configured */
public Monitoring getMonitoring() {
return monitoring;
}
public Metrics getUserMetrics() { return metrics; }
/** Returns a list of all config servers */
public List<Configserver> getConfigservers() {
return configservers;
}
public void removeSlobroks() { slobroks.clear(); }
/** Returns an immutable list of the slobroks in this */
public List<Slobrok> getSlobroks() { return Collections.unmodifiableList(slobroks); }
public void setLogserver(Logserver logserver) { this.logserver = logserver; }
/** Returns the log server for this, or null if none */
public Logserver getLogserver() { return logserver; }
public void addConfigservers(List<Configserver> configservers) {
this.configservers.addAll(configservers);
if (this.configservers.size() > 0) {
this.defaultConfigserver = configservers.get(0);
}
this.zooKeepersConfigProvider = new ZooKeepersConfigProvider(configservers);
}
public void addSlobroks(List<Slobrok> slobroks) {
this.slobroks.addAll(slobroks);
}
public ClusterControllerContainerCluster getClusterControllers() { return clusterControllers; }
public void setClusterControllers(ClusterControllerContainerCluster clusterControllers) {
if (multitenant) throw new RuntimeException("Should not use admin cluster controller in a multitenant environment");
this.clusterControllers = clusterControllers;
}
public Optional<LogserverContainerCluster> getLogServerContainerCluster() { return logServerContainerCluster; }
public void setLogserverContainerCluster(LogserverContainerCluster logServerContainerCluster) {
this.logServerContainerCluster = Optional.of(logServerContainerCluster);
}
public ZooKeepersConfigProvider getZooKeepersConfigProvider() {
return zooKeepersConfigProvider;
}
public void getConfig(SlobroksConfig.Builder builder) {
for (Slobrok slob : slobroks) {
builder.
slobrok(new SlobroksConfig.Slobrok.Builder().
connectionspec(slob.getConnectionSpec()));
}
}
public void getConfig(ZookeepersConfig.Builder builder) {
zooKeepersConfigProvider.getConfig(builder);
}
public FileDistributionConfigProducer getFileDistributionConfigProducer() {
return fileDistribution;
}
public List<HostResource> getClusterControllerHosts() {
List<HostResource> hosts = new ArrayList<>();
if (multitenant) {
if (logserver != null)
hosts.add(logserver.getHostResource());
} else {
for (Configserver configserver : getConfigservers()) {
hosts.add(configserver.getHostResource());
}
}
return hosts;
}
/**
* Adds services to all hosts in the system.
*/
public void addPerHostServices(List<HostResource> hosts, DeployState deployState) {
if (slobroks.isEmpty())
slobroks.addAll(createDefaultSlobrokSetup(deployState.getDeployLogger()));
if (deployState.getProperties().enableMetricsProxyContainer())
addMetricsProxyCluster(hosts, deployState);
for (HostResource host : hosts) {
if (!host.getHost().runsConfigServer()) {
addCommonServices(host, deployState);
}
}
}
private void addMetricsProxyCluster(List<HostResource> hosts, DeployState deployState) {
var metricsProxyCluster = new MetricsProxyContainerCluster(this, "metrics", deployState);
int index = 0;
for (var host : hosts) {
var container = new MetricsProxyContainer(metricsProxyCluster, index++);
addAndInitializeService(deployState.getDeployLogger(), host, container);
metricsProxyCluster.addContainer(container);
}
}
private void addCommonServices(HostResource host, DeployState deployState) {
addConfigSentinel(deployState.getDeployLogger(), host, deployState.getProperties().applicationId(), deployState.zone());
addLogd(deployState.getDeployLogger(), host);
addConfigProxy(deployState.getDeployLogger(), host);
addFileDistribution(host);
if (logForwarderConfig != null) {
addLogForwarder(deployState.getDeployLogger(), host);
}
}
private void addConfigSentinel(DeployLogger deployLogger, HostResource host, ApplicationId applicationId, Zone zone) {
ConfigSentinel configSentinel = new ConfigSentinel(host.getHost(), applicationId, zone);
addAndInitializeService(deployLogger, host, configSentinel);
host.getHost().setConfigSentinel(configSentinel);
}
private void addLogForwarder(DeployLogger deployLogger, HostResource host) {
addAndInitializeService(deployLogger, host, new LogForwarder(host.getHost(), logForwarderConfig));
}
private void addLogd(DeployLogger deployLogger, HostResource host) {
addAndInitializeService(deployLogger, host, new Logd(host.getHost()));
}
private void addConfigProxy(DeployLogger deployLogger, HostResource host) {
addAndInitializeService(deployLogger, host, new ConfigProxy(host.getHost()));
}
public void addAndInitializeService(DeployLogger deployLogger, HostResource host, AbstractService service) {
service.setHostResource(host);
service.initService(deployLogger);
}
private void addFileDistribution(HostResource host) {
FileDistributor fileDistributor = fileDistribution.getFileDistributor();
HostResource deployHost = getHostSystem().getHostByHostname(fileDistributor.fileSourceHost());
if (deployHostIsMissing(deployHost)) {
throw new RuntimeException("Could not find host in the application's host system: '" +
fileDistributor.fileSourceHost() + "'. Hostsystem=" + getHostSystem());
}
FileDistributionConfigProvider configProvider =
new FileDistributionConfigProvider(fileDistribution,
fileDistributor,
host == deployHost,
host.getHost());
fileDistribution.addFileDistributionConfigProducer(host.getHost(), configProvider);
}
private boolean deployHostIsMissing(HostResource deployHost) {
return !multitenant && deployHost == null;
}
private List<Slobrok> createDefaultSlobrokSetup(DeployLogger deployLogger) {
List<HostResource> hosts = getHostSystem().getHosts();
List<Slobrok> slobs = new ArrayList<>();
if (logserver != null) {
Slobrok slobrok = new Slobrok(this, 0);
addAndInitializeService(deployLogger, logserver.getHostResource(), slobrok);
slobs.add(slobrok);
}
int n = 0;
while ((n < hosts.size()) && (slobs.size() < 3)) {
HostResource host = hosts.get(n);
if ((logserver== null || host != logserver.getHostResource()) && ! host.getHost().runsConfigServer()) {
Slobrok newSlobrok = new Slobrok(this, slobs.size());
addAndInitializeService(deployLogger, host, newSlobrok);
slobs.add(newSlobrok);
}
n++;
}
int j = 0;
for (Slobrok s : slobs) {
s.setProp("index", j);
j++;
}
return slobs;
}
public boolean multitenant() {
return multitenant;
}
public void setApplicationType(ApplicationType applicationType) {
this.applicationType = applicationType;
}
public ApplicationType getApplicationType() { return applicationType; }
} | class Admin extends AbstractConfigProducer implements Serializable {
private static final long serialVersionUID = 1L;
private final boolean isHostedVespa;
private final Monitoring monitoring;
private final Metrics metrics;
private final List<Configserver> configservers = new ArrayList<>();
private final List<Slobrok> slobroks = new ArrayList<>();
private Configserver defaultConfigserver;
/** The log server, or null if none */
private Logserver logserver;
private LogForwarder.Config logForwarderConfig = null;
private ApplicationType applicationType = ApplicationType.DEFAULT;
public void setLogForwarderConfig(LogForwarder.Config cfg) {
this.logForwarderConfig = cfg;
}
/**
* The single cluster controller cluster shared by all content clusters by default when not multitenant.
* If multitenant, this is null.
*/
private ClusterControllerContainerCluster clusterControllers;
private Optional<LogserverContainerCluster> logServerContainerCluster = Optional.empty();
private MetricsProxyContainerCluster metricsProxyContainerCluster;
private ZooKeepersConfigProvider zooKeepersConfigProvider;
private FileDistributionConfigProducer fileDistribution;
private final boolean multitenant;
public Admin(AbstractConfigProducer parent,
Monitoring monitoring,
Metrics metrics,
boolean multitenant,
FileDistributionConfigProducer fileDistributionConfigProducer,
boolean isHostedVespa) {
super(parent, "admin");
this.isHostedVespa = isHostedVespa;
this.monitoring = monitoring;
this.metrics = metrics;
this.multitenant = multitenant;
this.fileDistribution = fileDistributionConfigProducer;
}
public Configserver getConfigserver() { return defaultConfigserver; }
/** Returns the configured monitoring endpoint, or null if not configured */
public Monitoring getMonitoring() {
return monitoring;
}
public Metrics getUserMetrics() { return metrics; }
/** Returns a list of all config servers */
public List<Configserver> getConfigservers() {
return configservers;
}
public void removeSlobroks() { slobroks.clear(); }
/** Returns an immutable list of the slobroks in this */
public List<Slobrok> getSlobroks() { return Collections.unmodifiableList(slobroks); }
public void setLogserver(Logserver logserver) { this.logserver = logserver; }
/** Returns the log server for this, or null if none */
public Logserver getLogserver() { return logserver; }
public void addConfigservers(List<Configserver> configservers) {
this.configservers.addAll(configservers);
if (this.configservers.size() > 0) {
this.defaultConfigserver = configservers.get(0);
}
this.zooKeepersConfigProvider = new ZooKeepersConfigProvider(configservers);
}
public void addSlobroks(List<Slobrok> slobroks) {
this.slobroks.addAll(slobroks);
}
public ClusterControllerContainerCluster getClusterControllers() { return clusterControllers; }
public void setClusterControllers(ClusterControllerContainerCluster clusterControllers) {
if (multitenant) throw new RuntimeException("Should not use admin cluster controller in a multitenant environment");
this.clusterControllers = clusterControllers;
}
public Optional<LogserverContainerCluster> getLogServerContainerCluster() { return logServerContainerCluster; }
public void setLogserverContainerCluster(LogserverContainerCluster logServerContainerCluster) {
this.logServerContainerCluster = Optional.of(logServerContainerCluster);
}
public ZooKeepersConfigProvider getZooKeepersConfigProvider() {
return zooKeepersConfigProvider;
}
public void getConfig(SlobroksConfig.Builder builder) {
for (Slobrok slob : slobroks) {
builder.
slobrok(new SlobroksConfig.Slobrok.Builder().
connectionspec(slob.getConnectionSpec()));
}
}
public void getConfig(ZookeepersConfig.Builder builder) {
zooKeepersConfigProvider.getConfig(builder);
}
public FileDistributionConfigProducer getFileDistributionConfigProducer() {
return fileDistribution;
}
public List<HostResource> getClusterControllerHosts() {
List<HostResource> hosts = new ArrayList<>();
if (multitenant) {
if (logserver != null)
hosts.add(logserver.getHostResource());
} else {
for (Configserver configserver : getConfigservers()) {
hosts.add(configserver.getHostResource());
}
}
return hosts;
}
/**
* Adds services to all hosts in the system.
*/
public void addPerHostServices(List<HostResource> hosts, DeployState deployState) {
if (slobroks.isEmpty())
slobroks.addAll(createDefaultSlobrokSetup(deployState.getDeployLogger()));
if (deployState.getProperties().enableMetricsProxyContainer())
addMetricsProxyCluster(hosts, deployState);
for (HostResource host : hosts) {
if (!host.getHost().runsConfigServer()) {
addCommonServices(host, deployState);
}
}
}
private void addMetricsProxyCluster(List<HostResource> hosts, DeployState deployState) {
var metricsProxyCluster = new MetricsProxyContainerCluster(this, "metrics", deployState);
int index = 0;
for (var host : hosts) {
var container = new MetricsProxyContainer(metricsProxyCluster, index++);
addAndInitializeService(deployState.getDeployLogger(), host, container);
metricsProxyCluster.addContainer(container);
}
}
private void addCommonServices(HostResource host, DeployState deployState) {
addConfigSentinel(deployState.getDeployLogger(), host, deployState.getProperties().applicationId(), deployState.zone());
addLogd(deployState.getDeployLogger(), host);
addConfigProxy(deployState.getDeployLogger(), host);
addFileDistribution(host);
if (logForwarderConfig != null) {
addLogForwarder(deployState.getDeployLogger(), host);
}
}
private void addConfigSentinel(DeployLogger deployLogger, HostResource host, ApplicationId applicationId, Zone zone) {
ConfigSentinel configSentinel = new ConfigSentinel(host.getHost(), applicationId, zone);
addAndInitializeService(deployLogger, host, configSentinel);
host.getHost().setConfigSentinel(configSentinel);
}
private void addLogForwarder(DeployLogger deployLogger, HostResource host) {
addAndInitializeService(deployLogger, host, new LogForwarder(host.getHost(), logForwarderConfig));
}
private void addLogd(DeployLogger deployLogger, HostResource host) {
addAndInitializeService(deployLogger, host, new Logd(host.getHost()));
}
private void addConfigProxy(DeployLogger deployLogger, HostResource host) {
addAndInitializeService(deployLogger, host, new ConfigProxy(host.getHost()));
}
public void addAndInitializeService(DeployLogger deployLogger, HostResource host, AbstractService service) {
service.setHostResource(host);
service.initService(deployLogger);
}
private void addFileDistribution(HostResource host) {
FileDistributor fileDistributor = fileDistribution.getFileDistributor();
HostResource deployHost = getHostSystem().getHostByHostname(fileDistributor.fileSourceHost());
if (deployHostIsMissing(deployHost)) {
throw new RuntimeException("Could not find host in the application's host system: '" +
fileDistributor.fileSourceHost() + "'. Hostsystem=" + getHostSystem());
}
FileDistributionConfigProvider configProvider =
new FileDistributionConfigProvider(fileDistribution,
fileDistributor,
host == deployHost,
host.getHost());
fileDistribution.addFileDistributionConfigProducer(host.getHost(), configProvider);
}
private boolean deployHostIsMissing(HostResource deployHost) {
return !multitenant && deployHost == null;
}
private List<Slobrok> createDefaultSlobrokSetup(DeployLogger deployLogger) {
List<HostResource> hosts = getHostSystem().getHosts();
List<Slobrok> slobs = new ArrayList<>();
if (logserver != null) {
Slobrok slobrok = new Slobrok(this, 0);
addAndInitializeService(deployLogger, logserver.getHostResource(), slobrok);
slobs.add(slobrok);
}
int n = 0;
while ((n < hosts.size()) && (slobs.size() < 3)) {
HostResource host = hosts.get(n);
if ((logserver== null || host != logserver.getHostResource()) && ! host.getHost().runsConfigServer()) {
Slobrok newSlobrok = new Slobrok(this, slobs.size());
addAndInitializeService(deployLogger, host, newSlobrok);
slobs.add(newSlobrok);
}
n++;
}
int j = 0;
for (Slobrok s : slobs) {
s.setProp("index", j);
j++;
}
return slobs;
}
public boolean multitenant() {
return multitenant;
}
public void setApplicationType(ApplicationType applicationType) {
this.applicationType = applicationType;
}
public ApplicationType getApplicationType() { return applicationType; }
} |
We decided to following the existing naming convention in the logd definition. | public void getConfig(LogdConfig.Builder builder) {
if (logserver == null) {
builder.logserver(new LogdConfig.Logserver.Builder().use(false));
}
else {
builder.
logserver(new LogdConfig.Logserver.Builder().
userpc(true).
use(logServerContainerCluster.isPresent() || !isHostedVespa).
host(logserver.getHostName()).
rpcport(logserver.getRelativePort(0)).
port(logserver.getRelativePort(1)));
}
} | userpc(true). | public void getConfig(LogdConfig.Builder builder) {
if (logserver == null) {
builder.logserver(new LogdConfig.Logserver.Builder().use(false));
}
else {
builder.
logserver(new LogdConfig.Logserver.Builder().
userpc(true).
use(logServerContainerCluster.isPresent() || !isHostedVespa).
host(logserver.getHostName()).
rpcport(logserver.getRelativePort(0)).
port(logserver.getRelativePort(1)));
}
} | class Admin extends AbstractConfigProducer implements Serializable {
private static final long serialVersionUID = 1L;
private final boolean isHostedVespa;
private final Monitoring monitoring;
private final Metrics metrics;
private final List<Configserver> configservers = new ArrayList<>();
private final List<Slobrok> slobroks = new ArrayList<>();
private Configserver defaultConfigserver;
/** The log server, or null if none */
private Logserver logserver;
private LogForwarder.Config logForwarderConfig = null;
private ApplicationType applicationType = ApplicationType.DEFAULT;
public void setLogForwarderConfig(LogForwarder.Config cfg) {
this.logForwarderConfig = cfg;
}
/**
* The single cluster controller cluster shared by all content clusters by default when not multitenant.
* If multitenant, this is null.
*/
private ClusterControllerContainerCluster clusterControllers;
private Optional<LogserverContainerCluster> logServerContainerCluster = Optional.empty();
private MetricsProxyContainerCluster metricsProxyContainerCluster;
private ZooKeepersConfigProvider zooKeepersConfigProvider;
private FileDistributionConfigProducer fileDistribution;
private final boolean multitenant;
public Admin(AbstractConfigProducer parent,
Monitoring monitoring,
Metrics metrics,
boolean multitenant,
FileDistributionConfigProducer fileDistributionConfigProducer,
boolean isHostedVespa) {
super(parent, "admin");
this.isHostedVespa = isHostedVespa;
this.monitoring = monitoring;
this.metrics = metrics;
this.multitenant = multitenant;
this.fileDistribution = fileDistributionConfigProducer;
}
public Configserver getConfigserver() { return defaultConfigserver; }
/** Returns the configured monitoring endpoint, or null if not configured */
public Monitoring getMonitoring() {
return monitoring;
}
public Metrics getUserMetrics() { return metrics; }
/** Returns a list of all config servers */
public List<Configserver> getConfigservers() {
return configservers;
}
public void removeSlobroks() { slobroks.clear(); }
/** Returns an immutable list of the slobroks in this */
public List<Slobrok> getSlobroks() { return Collections.unmodifiableList(slobroks); }
public void setLogserver(Logserver logserver) { this.logserver = logserver; }
/** Returns the log server for this, or null if none */
public Logserver getLogserver() { return logserver; }
public void addConfigservers(List<Configserver> configservers) {
this.configservers.addAll(configservers);
if (this.configservers.size() > 0) {
this.defaultConfigserver = configservers.get(0);
}
this.zooKeepersConfigProvider = new ZooKeepersConfigProvider(configservers);
}
public void addSlobroks(List<Slobrok> slobroks) {
this.slobroks.addAll(slobroks);
}
public ClusterControllerContainerCluster getClusterControllers() { return clusterControllers; }
public void setClusterControllers(ClusterControllerContainerCluster clusterControllers) {
if (multitenant) throw new RuntimeException("Should not use admin cluster controller in a multitenant environment");
this.clusterControllers = clusterControllers;
}
public Optional<LogserverContainerCluster> getLogServerContainerCluster() { return logServerContainerCluster; }
public void setLogserverContainerCluster(LogserverContainerCluster logServerContainerCluster) {
this.logServerContainerCluster = Optional.of(logServerContainerCluster);
}
public ZooKeepersConfigProvider getZooKeepersConfigProvider() {
return zooKeepersConfigProvider;
}
public void getConfig(SlobroksConfig.Builder builder) {
for (Slobrok slob : slobroks) {
builder.
slobrok(new SlobroksConfig.Slobrok.Builder().
connectionspec(slob.getConnectionSpec()));
}
}
public void getConfig(ZookeepersConfig.Builder builder) {
zooKeepersConfigProvider.getConfig(builder);
}
public FileDistributionConfigProducer getFileDistributionConfigProducer() {
return fileDistribution;
}
public List<HostResource> getClusterControllerHosts() {
List<HostResource> hosts = new ArrayList<>();
if (multitenant) {
if (logserver != null)
hosts.add(logserver.getHostResource());
} else {
for (Configserver configserver : getConfigservers()) {
hosts.add(configserver.getHostResource());
}
}
return hosts;
}
/**
* Adds services to all hosts in the system.
*/
public void addPerHostServices(List<HostResource> hosts, DeployState deployState) {
if (slobroks.isEmpty())
slobroks.addAll(createDefaultSlobrokSetup(deployState.getDeployLogger()));
if (deployState.getProperties().enableMetricsProxyContainer())
addMetricsProxyCluster(hosts, deployState);
for (HostResource host : hosts) {
if (!host.getHost().runsConfigServer()) {
addCommonServices(host, deployState);
}
}
}
private void addMetricsProxyCluster(List<HostResource> hosts, DeployState deployState) {
var metricsProxyCluster = new MetricsProxyContainerCluster(this, "metrics", deployState);
int index = 0;
for (var host : hosts) {
var container = new MetricsProxyContainer(metricsProxyCluster, index++);
addAndInitializeService(deployState.getDeployLogger(), host, container);
metricsProxyCluster.addContainer(container);
}
}
private void addCommonServices(HostResource host, DeployState deployState) {
addConfigSentinel(deployState.getDeployLogger(), host, deployState.getProperties().applicationId(), deployState.zone());
addLogd(deployState.getDeployLogger(), host);
addConfigProxy(deployState.getDeployLogger(), host);
addFileDistribution(host);
if (logForwarderConfig != null) {
addLogForwarder(deployState.getDeployLogger(), host);
}
}
private void addConfigSentinel(DeployLogger deployLogger, HostResource host, ApplicationId applicationId, Zone zone) {
ConfigSentinel configSentinel = new ConfigSentinel(host.getHost(), applicationId, zone);
addAndInitializeService(deployLogger, host, configSentinel);
host.getHost().setConfigSentinel(configSentinel);
}
private void addLogForwarder(DeployLogger deployLogger, HostResource host) {
addAndInitializeService(deployLogger, host, new LogForwarder(host.getHost(), logForwarderConfig));
}
private void addLogd(DeployLogger deployLogger, HostResource host) {
addAndInitializeService(deployLogger, host, new Logd(host.getHost()));
}
private void addConfigProxy(DeployLogger deployLogger, HostResource host) {
addAndInitializeService(deployLogger, host, new ConfigProxy(host.getHost()));
}
public void addAndInitializeService(DeployLogger deployLogger, HostResource host, AbstractService service) {
service.setHostResource(host);
service.initService(deployLogger);
}
private void addFileDistribution(HostResource host) {
FileDistributor fileDistributor = fileDistribution.getFileDistributor();
HostResource deployHost = getHostSystem().getHostByHostname(fileDistributor.fileSourceHost());
if (deployHostIsMissing(deployHost)) {
throw new RuntimeException("Could not find host in the application's host system: '" +
fileDistributor.fileSourceHost() + "'. Hostsystem=" + getHostSystem());
}
FileDistributionConfigProvider configProvider =
new FileDistributionConfigProvider(fileDistribution,
fileDistributor,
host == deployHost,
host.getHost());
fileDistribution.addFileDistributionConfigProducer(host.getHost(), configProvider);
}
private boolean deployHostIsMissing(HostResource deployHost) {
return !multitenant && deployHost == null;
}
private List<Slobrok> createDefaultSlobrokSetup(DeployLogger deployLogger) {
List<HostResource> hosts = getHostSystem().getHosts();
List<Slobrok> slobs = new ArrayList<>();
if (logserver != null) {
Slobrok slobrok = new Slobrok(this, 0);
addAndInitializeService(deployLogger, logserver.getHostResource(), slobrok);
slobs.add(slobrok);
}
int n = 0;
while ((n < hosts.size()) && (slobs.size() < 3)) {
HostResource host = hosts.get(n);
if ((logserver== null || host != logserver.getHostResource()) && ! host.getHost().runsConfigServer()) {
Slobrok newSlobrok = new Slobrok(this, slobs.size());
addAndInitializeService(deployLogger, host, newSlobrok);
slobs.add(newSlobrok);
}
n++;
}
int j = 0;
for (Slobrok s : slobs) {
s.setProp("index", j);
j++;
}
return slobs;
}
public boolean multitenant() {
return multitenant;
}
public void setApplicationType(ApplicationType applicationType) {
this.applicationType = applicationType;
}
public ApplicationType getApplicationType() { return applicationType; }
} | class Admin extends AbstractConfigProducer implements Serializable {
private static final long serialVersionUID = 1L;
private final boolean isHostedVespa;
private final Monitoring monitoring;
private final Metrics metrics;
private final List<Configserver> configservers = new ArrayList<>();
private final List<Slobrok> slobroks = new ArrayList<>();
private Configserver defaultConfigserver;
/** The log server, or null if none */
private Logserver logserver;
private LogForwarder.Config logForwarderConfig = null;
private ApplicationType applicationType = ApplicationType.DEFAULT;
public void setLogForwarderConfig(LogForwarder.Config cfg) {
this.logForwarderConfig = cfg;
}
/**
* The single cluster controller cluster shared by all content clusters by default when not multitenant.
* If multitenant, this is null.
*/
private ClusterControllerContainerCluster clusterControllers;
private Optional<LogserverContainerCluster> logServerContainerCluster = Optional.empty();
private MetricsProxyContainerCluster metricsProxyContainerCluster;
private ZooKeepersConfigProvider zooKeepersConfigProvider;
private FileDistributionConfigProducer fileDistribution;
private final boolean multitenant;
public Admin(AbstractConfigProducer parent,
Monitoring monitoring,
Metrics metrics,
boolean multitenant,
FileDistributionConfigProducer fileDistributionConfigProducer,
boolean isHostedVespa) {
super(parent, "admin");
this.isHostedVespa = isHostedVespa;
this.monitoring = monitoring;
this.metrics = metrics;
this.multitenant = multitenant;
this.fileDistribution = fileDistributionConfigProducer;
}
public Configserver getConfigserver() { return defaultConfigserver; }
/** Returns the configured monitoring endpoint, or null if not configured */
public Monitoring getMonitoring() {
return monitoring;
}
public Metrics getUserMetrics() { return metrics; }
/** Returns a list of all config servers */
public List<Configserver> getConfigservers() {
return configservers;
}
public void removeSlobroks() { slobroks.clear(); }
/** Returns an immutable list of the slobroks in this */
public List<Slobrok> getSlobroks() { return Collections.unmodifiableList(slobroks); }
public void setLogserver(Logserver logserver) { this.logserver = logserver; }
/** Returns the log server for this, or null if none */
public Logserver getLogserver() { return logserver; }
public void addConfigservers(List<Configserver> configservers) {
this.configservers.addAll(configservers);
if (this.configservers.size() > 0) {
this.defaultConfigserver = configservers.get(0);
}
this.zooKeepersConfigProvider = new ZooKeepersConfigProvider(configservers);
}
public void addSlobroks(List<Slobrok> slobroks) {
this.slobroks.addAll(slobroks);
}
public ClusterControllerContainerCluster getClusterControllers() { return clusterControllers; }
public void setClusterControllers(ClusterControllerContainerCluster clusterControllers) {
if (multitenant) throw new RuntimeException("Should not use admin cluster controller in a multitenant environment");
this.clusterControllers = clusterControllers;
}
public Optional<LogserverContainerCluster> getLogServerContainerCluster() { return logServerContainerCluster; }
public void setLogserverContainerCluster(LogserverContainerCluster logServerContainerCluster) {
this.logServerContainerCluster = Optional.of(logServerContainerCluster);
}
public ZooKeepersConfigProvider getZooKeepersConfigProvider() {
return zooKeepersConfigProvider;
}
public void getConfig(SlobroksConfig.Builder builder) {
for (Slobrok slob : slobroks) {
builder.
slobrok(new SlobroksConfig.Slobrok.Builder().
connectionspec(slob.getConnectionSpec()));
}
}
public void getConfig(ZookeepersConfig.Builder builder) {
zooKeepersConfigProvider.getConfig(builder);
}
public FileDistributionConfigProducer getFileDistributionConfigProducer() {
return fileDistribution;
}
public List<HostResource> getClusterControllerHosts() {
List<HostResource> hosts = new ArrayList<>();
if (multitenant) {
if (logserver != null)
hosts.add(logserver.getHostResource());
} else {
for (Configserver configserver : getConfigservers()) {
hosts.add(configserver.getHostResource());
}
}
return hosts;
}
/**
* Adds services to all hosts in the system.
*/
public void addPerHostServices(List<HostResource> hosts, DeployState deployState) {
if (slobroks.isEmpty())
slobroks.addAll(createDefaultSlobrokSetup(deployState.getDeployLogger()));
if (deployState.getProperties().enableMetricsProxyContainer())
addMetricsProxyCluster(hosts, deployState);
for (HostResource host : hosts) {
if (!host.getHost().runsConfigServer()) {
addCommonServices(host, deployState);
}
}
}
private void addMetricsProxyCluster(List<HostResource> hosts, DeployState deployState) {
var metricsProxyCluster = new MetricsProxyContainerCluster(this, "metrics", deployState);
int index = 0;
for (var host : hosts) {
var container = new MetricsProxyContainer(metricsProxyCluster, index++);
addAndInitializeService(deployState.getDeployLogger(), host, container);
metricsProxyCluster.addContainer(container);
}
}
private void addCommonServices(HostResource host, DeployState deployState) {
addConfigSentinel(deployState.getDeployLogger(), host, deployState.getProperties().applicationId(), deployState.zone());
addLogd(deployState.getDeployLogger(), host);
addConfigProxy(deployState.getDeployLogger(), host);
addFileDistribution(host);
if (logForwarderConfig != null) {
addLogForwarder(deployState.getDeployLogger(), host);
}
}
private void addConfigSentinel(DeployLogger deployLogger, HostResource host, ApplicationId applicationId, Zone zone) {
ConfigSentinel configSentinel = new ConfigSentinel(host.getHost(), applicationId, zone);
addAndInitializeService(deployLogger, host, configSentinel);
host.getHost().setConfigSentinel(configSentinel);
}
private void addLogForwarder(DeployLogger deployLogger, HostResource host) {
addAndInitializeService(deployLogger, host, new LogForwarder(host.getHost(), logForwarderConfig));
}
private void addLogd(DeployLogger deployLogger, HostResource host) {
addAndInitializeService(deployLogger, host, new Logd(host.getHost()));
}
private void addConfigProxy(DeployLogger deployLogger, HostResource host) {
addAndInitializeService(deployLogger, host, new ConfigProxy(host.getHost()));
}
public void addAndInitializeService(DeployLogger deployLogger, HostResource host, AbstractService service) {
service.setHostResource(host);
service.initService(deployLogger);
}
private void addFileDistribution(HostResource host) {
FileDistributor fileDistributor = fileDistribution.getFileDistributor();
HostResource deployHost = getHostSystem().getHostByHostname(fileDistributor.fileSourceHost());
if (deployHostIsMissing(deployHost)) {
throw new RuntimeException("Could not find host in the application's host system: '" +
fileDistributor.fileSourceHost() + "'. Hostsystem=" + getHostSystem());
}
FileDistributionConfigProvider configProvider =
new FileDistributionConfigProvider(fileDistribution,
fileDistributor,
host == deployHost,
host.getHost());
fileDistribution.addFileDistributionConfigProducer(host.getHost(), configProvider);
}
private boolean deployHostIsMissing(HostResource deployHost) {
return !multitenant && deployHost == null;
}
private List<Slobrok> createDefaultSlobrokSetup(DeployLogger deployLogger) {
List<HostResource> hosts = getHostSystem().getHosts();
List<Slobrok> slobs = new ArrayList<>();
if (logserver != null) {
Slobrok slobrok = new Slobrok(this, 0);
addAndInitializeService(deployLogger, logserver.getHostResource(), slobrok);
slobs.add(slobrok);
}
int n = 0;
while ((n < hosts.size()) && (slobs.size() < 3)) {
HostResource host = hosts.get(n);
if ((logserver== null || host != logserver.getHostResource()) && ! host.getHost().runsConfigServer()) {
Slobrok newSlobrok = new Slobrok(this, slobs.size());
addAndInitializeService(deployLogger, host, newSlobrok);
slobs.add(newSlobrok);
}
n++;
}
int j = 0;
for (Slobrok s : slobs) {
s.setProp("index", j);
j++;
}
return slobs;
}
public boolean multitenant() {
return multitenant;
}
public void setApplicationType(ApplicationType applicationType) {
this.applicationType = applicationType;
}
public ApplicationType getApplicationType() { return applicationType; }
} |
Never mind, I see these are enums in the client API. | private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
} | default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); | private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case PUT: return handlePUT(request);
case POST: return handlePOST(request);
case PATCH: return handlePATCH(request);
case DELETE: return handleDELETE(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return ErrorResponse.from(e);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/user")) return authenticatedUser(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/user")) return createUser(request);
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}"))
return setMajorVersion(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyJsonResponse response = new EmptyJsonResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "user", "tenant");
}
private HttpResponse authenticatedUser(HttpRequest request) {
Principal user = requireUserPrincipal(request);
if (user == null)
throw new NotAuthorizedException("You must be authenticated.");
String userName = user instanceof AthenzPrincipal ? ((AthenzPrincipal) user).getIdentity().getName() : user.getName();
TenantName tenantName = TenantName.from(UserTenant.normalizeUser(userName));
List<Tenant> tenants = controller.tenants().asList(new Credentials(user));
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setString("user", userName);
Cursor tenantsArray = response.setArray("tenants");
for (Tenant tenant : tenants)
tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject());
response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant.name().equals(tenantName)));
return new SlimeJsonResponse(slime);
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Slime slime = new Slime();
Cursor array = slime.setArray();
for (Application application : controller.applications().asList(tenant))
toSlime(application, array.addObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse setMajorVersion(String tenantName, String applicationName, HttpRequest request) {
Application application = getApplication(tenantName, applicationName);
Inspector majorVersionField = toSlime(request.getData()).get().field("majorVersion");
if ( ! majorVersionField.valid())
throw new IllegalArgumentException("Request body must contain a majorVersion field");
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int)majorVersionField.asLong();
controller.applications().lockIfPresent(application.id(),
a -> controller.applications().store(a.withMajorVersion(majorVersion)));
return new MessageResponse("Set major version to " + ( majorVersion == null ? "empty" : majorVersion));
}
private Application getApplication(String tenantName, String applicationName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
return controller.applications().get(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
List<Node> nodes = controller.configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.canonicalFlavor());
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case unorchestrated: return "unorchestrated";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
if (queryParameters.containsKey("streaming")) {
InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters);
Slime slime = new Slime();
Cursor object = slime.setObject();
if (response.isPresent()) {
response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
String triggered = controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName())
.stream().map(JobType::jobName).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + application.id().instance().value() + "/job/",
request.getUri()).toString());
application.deploymentJobs().statusOf(JobType.component)
.flatMap(JobStatus::lastSuccess)
.map(run -> run.application().source())
.ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source")));
application.deploymentJobs().projectId()
.ifPresent(id -> object.setLong("projectId", id));
if ( ! application.change().isEmpty()) {
toSlime(object.setObject("deploying"), application.change());
}
if ( ! application.outstandingChange().isEmpty()) {
toSlime(object.setObject("outstandingChange"), application.outstandingChange());
}
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedJobs(application.deploymentJobs().jobStatus().values());
object.setBool("deployedInternally", application.deploymentJobs().deployedInternally());
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
});
object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor globalRotationsArray = object.setArray("globalRotations");
application.globalDnsName(controller.system()).ifPresent(rotation -> {
globalRotationsArray.addString(rotation.url().toString());
globalRotationsArray.addString(rotation.secureUrl().toString());
globalRotationsArray.addString(rotation.oathUrl().toString());
object.setString("rotationId", application.rotation().get().asString());
});
Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id());
for (RoutingPolicy policy : routingPolicies) {
for (RotationName rotation : policy.rotations()) {
GlobalDnsName dnsName = new GlobalDnsName(application.id(), controller.system(), rotation);
globalRotationsArray.addString(dnsName.oathUrl().toString());
}
}
List<Deployment> deployments = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedDeployments(application.deployments().values());
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) {
toSlime(application.rotationStatus(deployment), deploymentObject);
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", application.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value() +
"/instance/" + application.id().instance().value(),
request.getUri()).toString());
}
}
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().get(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(application.id(),
ZoneId.from(environment, region));
Deployment deployment = application.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
Cursor serviceUrlArray = response.setArray("serviceUrls");
controller.applications().getDeploymentEndpoints(deploymentId)
.ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId()
.ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentCost appCost = deployment.calculateCost();
Cursor costObject = response.setObject("cost");
toSlime(appCost, costObject);
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if (!applicationVersion.isUnknown()) {
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if ( ! revision.isPresent()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationStatus status, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", status.name().toUpperCase());
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Inspector requestData = toSlime(request.getData()).get();
String reason = mandatory("reason", requestData).asString();
String agent = requireUserPrincipal(request).getName();
long timestamp = controller.clock().instant().getEpochSecond();
EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp);
controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()),
endpointStatus);
return new MessageResponse(String.format("Successfully set %s in %s.%s %s service",
application.id().toShortString(),
deployment.zone().environment().value(),
deployment.zone().region().value(),
inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId);
for (RoutingEndpoint endpoint : status.keySet()) {
EndpointStatus currentStatus = status.get(endpoint);
array.addString(endpoint.upstreamName());
Cursor statusObject = array.addObject();
statusObject.setString("status", currentStatus.getStatus().name());
statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason());
statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent());
statusObject.setLong("timestamp", currentStatus.getEpoch());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().require(applicationId);
ZoneId zone = ZoneId.from(environment, region);
if (!application.rotation().isPresent()) {
throw new NotExistsException("global rotation does not exist for " + application);
}
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(application.rotationStatus(deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenant, String application, HttpRequest request) {
Application app = controller.applications().require(ApplicationId.from(tenant, application, "default"));
Slime slime = new Slime();
Cursor root = slime.setObject();
if (!app.change().isEmpty()) {
app.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", app.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse createUser(HttpRequest request) {
String user = Optional.of(requireUserPrincipal(request))
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzIdentity::getName)
.map(UserTenant::normalizeUser)
.orElseThrow(() -> new ForbiddenException("Not authenticated or not a user."));
UserTenant tenant = UserTenant.create(user);
try {
controller.tenants().createUser(tenant);
return new MessageResponse("Created user '" + user + "'");
} catch (AlreadyExistsException e) {
return new MessageResponse("User '" + user + "' already exists");
}
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
try {
Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user
? Optional.empty()
: Optional.of(accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()));
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(application, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
catch (ZmsClientException e) {
if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN)
throw new ForbiddenException("Not authorized to create application", e);
else
throw e;
}
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = application.get().change();
if (change.isEmpty()) {
response.append("No deployment in progress for " + application + " at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '" + change + "' to '" +
controller.applications().require(id).change() + "' for " + application);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the zone application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
/*
* Deploy direct is when we want to redeploy the current application - retrieve version
* info from the application package before deploying
*/
if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) {
Optional<Deployment> deployment = controller.applications().get(applicationId)
.map(Application::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(!deployment.isPresent())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass,
Optional.of(requireUserPrincipal(request)));
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if ( ! tenant.isPresent())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
if (tenant.get().type() == Tenant.Type.user)
controller.tenants().deleteUser((UserTenant) tenant.get());
else
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user
? Optional.empty()
: Optional.of(accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()));
controller.applications().deleteApplication(id, credentials);
return new EmptyJsonResponse();
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
controller.applications().deactivate(application.id(), ZoneId.from(environment, region));
return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
/**
* Promote application Chef environments. To be used by component jobs only
*/
private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) {
try{
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.systemChefEnvironment();
String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
/**
* Promote application Chef environments for jobs that deploy applications
*/
private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) {
try {
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) {
try {
DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get());
if ( report.jobType() == JobType.component
&& controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally())
throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " +
"longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.");
controller.applications().deploymentTrigger().notifyOfCompletion(report);
return new MessageResponse("ok");
} catch (IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
}
private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) {
Optional<DeploymentJobs.JobError> jobError = Optional.empty();
if (report.field("jobError").valid()) {
jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString()));
}
ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString());
JobType type = JobType.fromJobName(report.field("jobName").asString());
long buildNumber = report.field("buildNumber").asLong();
if (type == JobType.component)
return DeploymentJobs.JobReport.ofComponent(id,
report.field("projectId").asLong(),
buildNumber,
jobError,
toSourceRevision(report.field("sourceRevision")));
else
return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError);
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tentantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case user: break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : controller.applications().asList(tenant.name())) {
if (application.id().instance().isDefault()) {
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), application, request);
else
toSlime(application, applicationArray.addObject(), request);
}
}
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tentantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case user: break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(JobStatus.JobRun jobRun, Cursor object) {
object.setLong("id", jobRun.id());
object.setString("version", jobRun.platform().toFullString());
if (!jobRun.application().isUnknown())
toSlime(jobRun.application(), object.setObject("revision"));
object.setString("reason", jobRun.reason());
object.setLong("at", jobRun.at().toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(Application application, Cursor object, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(), request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
public static void toSlime(DeploymentCost deploymentCost, Cursor object) {
object.setLong("tco", (long)deploymentCost.getTco());
object.setLong("waste", (long)deploymentCost.getWaste());
object.setDouble("utilization", deploymentCost.getUtilization());
Cursor clustersObject = object.setObject("cluster");
for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet())
toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey()));
}
private static void toSlime(ClusterCost clusterCost, Cursor object) {
object.setLong("count", clusterCost.getClusterInfo().getHostnames().size());
object.setString("resource", getResourceName(clusterCost.getResultUtilization()));
object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization());
object.setLong("tco", (int)clusterCost.getTco());
object.setLong("waste", (int)clusterCost.getWaste());
object.setString("flavor", clusterCost.getClusterInfo().getFlavor());
object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost());
object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU());
object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem());
object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk());
object.setString("type", clusterCost.getClusterInfo().getClusterType().name());
Cursor utilObject = object.setObject("util");
utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu());
utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory());
utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk());
utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy());
Cursor usageObject = object.setObject("usage");
usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu());
usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory());
usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk());
usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy());
Cursor hostnamesArray = object.setArray("hostnames");
for (String hostname : clusterCost.getClusterInfo().getHostnames())
hostnamesArray.addString(hostname);
}
private static String getResourceName(ClusterUtilization utilization) {
String name = "cpu";
double max = utilization.getMaxUtilization();
if (utilization.getMemory() == max) {
name = "mem";
} else if (utilization.getDisk() == max) {
name = "disk";
} else if (utilization.getDiskBusy() == max) {
name = "diskbusy";
}
return name;
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static String tentantType(Tenant tenant) {
switch (tenant.type()) {
case user: return "USER";
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
SourceRevision sourceRevision = toSourceRevision(submitOptions);
String authorEmail = submitOptions.field("authorEmail").asString();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case PUT: return handlePUT(request);
case POST: return handlePOST(request);
case PATCH: return handlePATCH(request);
case DELETE: return handleDELETE(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return ErrorResponse.from(e);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/user")) return authenticatedUser(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/user")) return createUser(request);
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}"))
return setMajorVersion(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyJsonResponse response = new EmptyJsonResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "user", "tenant");
}
private HttpResponse authenticatedUser(HttpRequest request) {
Principal user = requireUserPrincipal(request);
if (user == null)
throw new NotAuthorizedException("You must be authenticated.");
String userName = user instanceof AthenzPrincipal ? ((AthenzPrincipal) user).getIdentity().getName() : user.getName();
TenantName tenantName = TenantName.from(UserTenant.normalizeUser(userName));
List<Tenant> tenants = controller.tenants().asList(new Credentials(user));
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setString("user", userName);
Cursor tenantsArray = response.setArray("tenants");
for (Tenant tenant : tenants)
tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject());
response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant.name().equals(tenantName)));
return new SlimeJsonResponse(slime);
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Slime slime = new Slime();
Cursor array = slime.setArray();
for (Application application : controller.applications().asList(tenant))
toSlime(application, array.addObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse setMajorVersion(String tenantName, String applicationName, HttpRequest request) {
Application application = getApplication(tenantName, applicationName);
Inspector majorVersionField = toSlime(request.getData()).get().field("majorVersion");
if ( ! majorVersionField.valid())
throw new IllegalArgumentException("Request body must contain a majorVersion field");
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int)majorVersionField.asLong();
controller.applications().lockIfPresent(application.id(),
a -> controller.applications().store(a.withMajorVersion(majorVersion)));
return new MessageResponse("Set major version to " + ( majorVersion == null ? "empty" : majorVersion));
}
private Application getApplication(String tenantName, String applicationName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
return controller.applications().get(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
List<Node> nodes = controller.configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.canonicalFlavor());
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case unorchestrated: return "unorchestrated";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
if (queryParameters.containsKey("streaming")) {
InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters);
Slime slime = new Slime();
Cursor object = slime.setObject();
if (response.isPresent()) {
response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
String triggered = controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName())
.stream().map(JobType::jobName).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + application.id().instance().value() + "/job/",
request.getUri()).toString());
application.deploymentJobs().statusOf(JobType.component)
.flatMap(JobStatus::lastSuccess)
.map(run -> run.application().source())
.ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source")));
application.deploymentJobs().projectId()
.ifPresent(id -> object.setLong("projectId", id));
if ( ! application.change().isEmpty()) {
toSlime(object.setObject("deploying"), application.change());
}
if ( ! application.outstandingChange().isEmpty()) {
toSlime(object.setObject("outstandingChange"), application.outstandingChange());
}
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedJobs(application.deploymentJobs().jobStatus().values());
object.setBool("deployedInternally", application.deploymentJobs().deployedInternally());
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
});
object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor globalRotationsArray = object.setArray("globalRotations");
application.globalDnsName(controller.system()).ifPresent(rotation -> {
globalRotationsArray.addString(rotation.url().toString());
globalRotationsArray.addString(rotation.secureUrl().toString());
globalRotationsArray.addString(rotation.oathUrl().toString());
object.setString("rotationId", application.rotation().get().asString());
});
Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id());
for (RoutingPolicy policy : routingPolicies) {
for (RotationName rotation : policy.rotations()) {
GlobalDnsName dnsName = new GlobalDnsName(application.id(), controller.system(), rotation);
globalRotationsArray.addString(dnsName.oathUrl().toString());
}
}
List<Deployment> deployments = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedDeployments(application.deployments().values());
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) {
toSlime(application.rotationStatus(deployment), deploymentObject);
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", application.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value() +
"/instance/" + application.id().instance().value(),
request.getUri()).toString());
}
}
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().get(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(application.id(),
ZoneId.from(environment, region));
Deployment deployment = application.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
Cursor serviceUrlArray = response.setArray("serviceUrls");
controller.applications().getDeploymentEndpoints(deploymentId)
.ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId()
.ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentCost appCost = deployment.calculateCost();
Cursor costObject = response.setObject("cost");
toSlime(appCost, costObject);
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if (!applicationVersion.isUnknown()) {
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if ( ! revision.isPresent()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationStatus status, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", status.name().toUpperCase());
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Inspector requestData = toSlime(request.getData()).get();
String reason = mandatory("reason", requestData).asString();
String agent = requireUserPrincipal(request).getName();
long timestamp = controller.clock().instant().getEpochSecond();
EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp);
controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()),
endpointStatus);
return new MessageResponse(String.format("Successfully set %s in %s.%s %s service",
application.id().toShortString(),
deployment.zone().environment().value(),
deployment.zone().region().value(),
inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId);
for (RoutingEndpoint endpoint : status.keySet()) {
EndpointStatus currentStatus = status.get(endpoint);
array.addString(endpoint.upstreamName());
Cursor statusObject = array.addObject();
statusObject.setString("status", currentStatus.getStatus().name());
statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason());
statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent());
statusObject.setLong("timestamp", currentStatus.getEpoch());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().require(applicationId);
ZoneId zone = ZoneId.from(environment, region);
if (!application.rotation().isPresent()) {
throw new NotExistsException("global rotation does not exist for " + application);
}
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(application.rotationStatus(deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenant, String application, HttpRequest request) {
Application app = controller.applications().require(ApplicationId.from(tenant, application, "default"));
Slime slime = new Slime();
Cursor root = slime.setObject();
if (!app.change().isEmpty()) {
app.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", app.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse createUser(HttpRequest request) {
String user = Optional.of(requireUserPrincipal(request))
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzIdentity::getName)
.map(UserTenant::normalizeUser)
.orElseThrow(() -> new ForbiddenException("Not authenticated or not a user."));
UserTenant tenant = UserTenant.create(user);
try {
controller.tenants().createUser(tenant);
return new MessageResponse("Created user '" + user + "'");
} catch (AlreadyExistsException e) {
return new MessageResponse("User '" + user + "' already exists");
}
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
try {
Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user
? Optional.empty()
: Optional.of(accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()));
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(application, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
catch (ZmsClientException e) {
if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN)
throw new ForbiddenException("Not authorized to create application", e);
else
throw e;
}
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = application.get().change();
if (change.isEmpty()) {
response.append("No deployment in progress for " + application + " at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '" + change + "' to '" +
controller.applications().require(id).change() + "' for " + application);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the zone application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
/*
* Deploy direct is when we want to redeploy the current application - retrieve version
* info from the application package before deploying
*/
if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) {
Optional<Deployment> deployment = controller.applications().get(applicationId)
.map(Application::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(!deployment.isPresent())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass,
Optional.of(requireUserPrincipal(request)));
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if ( ! tenant.isPresent())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
if (tenant.get().type() == Tenant.Type.user)
controller.tenants().deleteUser((UserTenant) tenant.get());
else
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user
? Optional.empty()
: Optional.of(accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()));
controller.applications().deleteApplication(id, credentials);
return new EmptyJsonResponse();
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
controller.applications().deactivate(application.id(), ZoneId.from(environment, region));
return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
/**
* Promote application Chef environments. To be used by component jobs only
*/
private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) {
try{
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.systemChefEnvironment();
String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
/**
* Promote application Chef environments for jobs that deploy applications
*/
private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) {
try {
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) {
try {
DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get());
if ( report.jobType() == JobType.component
&& controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally())
throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " +
"longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.");
controller.applications().deploymentTrigger().notifyOfCompletion(report);
return new MessageResponse("ok");
} catch (IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
}
private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) {
Optional<DeploymentJobs.JobError> jobError = Optional.empty();
if (report.field("jobError").valid()) {
jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString()));
}
ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString());
JobType type = JobType.fromJobName(report.field("jobName").asString());
long buildNumber = report.field("buildNumber").asLong();
if (type == JobType.component)
return DeploymentJobs.JobReport.ofComponent(id,
report.field("projectId").asLong(),
buildNumber,
jobError,
toSourceRevision(report.field("sourceRevision")));
else
return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError);
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tentantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case user: break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : controller.applications().asList(tenant.name())) {
if (application.id().instance().isDefault()) {
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), application, request);
else
toSlime(application, applicationArray.addObject(), request);
}
}
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tentantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case user: break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(JobStatus.JobRun jobRun, Cursor object) {
object.setLong("id", jobRun.id());
object.setString("version", jobRun.platform().toFullString());
if (!jobRun.application().isUnknown())
toSlime(jobRun.application(), object.setObject("revision"));
object.setString("reason", jobRun.reason());
object.setLong("at", jobRun.at().toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(Application application, Cursor object, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(), request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
public static void toSlime(DeploymentCost deploymentCost, Cursor object) {
object.setLong("tco", (long)deploymentCost.getTco());
object.setLong("waste", (long)deploymentCost.getWaste());
object.setDouble("utilization", deploymentCost.getUtilization());
Cursor clustersObject = object.setObject("cluster");
for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet())
toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey()));
}
private static void toSlime(ClusterCost clusterCost, Cursor object) {
object.setLong("count", clusterCost.getClusterInfo().getHostnames().size());
object.setString("resource", getResourceName(clusterCost.getResultUtilization()));
object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization());
object.setLong("tco", (int)clusterCost.getTco());
object.setLong("waste", (int)clusterCost.getWaste());
object.setString("flavor", clusterCost.getClusterInfo().getFlavor());
object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost());
object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU());
object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem());
object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk());
object.setString("type", clusterCost.getClusterInfo().getClusterType().name());
Cursor utilObject = object.setObject("util");
utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu());
utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory());
utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk());
utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy());
Cursor usageObject = object.setObject("usage");
usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu());
usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory());
usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk());
usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy());
Cursor hostnamesArray = object.setArray("hostnames");
for (String hostname : clusterCost.getClusterInfo().getHostnames())
hostnamesArray.addString(hostname);
}
private static String getResourceName(ClusterUtilization utilization) {
String name = "cpu";
double max = utilization.getMaxUtilization();
if (utilization.getMemory() == max) {
name = "mem";
} else if (utilization.getDisk() == max) {
name = "disk";
} else if (utilization.getDiskBusy() == max) {
name = "diskbusy";
}
return name;
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static String tentantType(Tenant tenant) {
switch (tenant.type()) {
case user: return "USER";
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
SourceRevision sourceRevision = toSourceRevision(submitOptions);
String authorEmail = submitOptions.field("authorEmail").asString();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
} |
Why can we remove this check? | private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.get().deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getOrAssignRotation(application.get(), rotationLock);
application = application.with(rotation.id());
store(application);
boolean redirectLegacyDns = redirectLegacyDnsFlag.with(FetchVector.Dimension.APPLICATION_ID, application.get().id().serializedForm())
.value();
EndpointList globalEndpoints = application.get()
.endpointsIn(controller.system())
.scope(Endpoint.Scope.global);
globalEndpoints.main().ifPresent(mainEndpoint -> {
registerCname(mainEndpoint.dnsName(), rotation.name());
if (redirectLegacyDns) {
globalEndpoints.legacy(true).asList().forEach(endpoint -> registerCname(endpoint.dnsName(), mainEndpoint.dnsName()));
} else {
globalEndpoints.legacy(true).asList().forEach(endpoint -> registerCname(endpoint.dnsName(), rotation.name()));
}
});
}
}
return application;
} | .scope(Endpoint.Scope.global); | private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.get().deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getOrAssignRotation(application.get(), rotationLock);
application = application.with(rotation.id());
store(application);
boolean redirectLegacyDns = redirectLegacyDnsFlag.with(FetchVector.Dimension.APPLICATION_ID, application.get().id().serializedForm())
.value();
EndpointList globalEndpoints = application.get()
.endpointsIn(controller.system())
.scope(Endpoint.Scope.global);
globalEndpoints.main().ifPresent(mainEndpoint -> {
registerCname(mainEndpoint.dnsName(), rotation.name());
if (redirectLegacyDns) {
globalEndpoints.legacy(true).asList().forEach(endpoint -> registerCname(endpoint.dnsName(), mainEndpoint.dnsName()));
} else {
globalEndpoints.legacy(true).asList().forEach(endpoint -> registerCname(endpoint.dnsName(), rotation.name()));
}
});
}
}
return application;
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final RotationRepository rotationRepository;
private final AccessControl accessControl;
private final NameService nameService;
private final ConfigServer configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final BooleanFlag redirectLegacyDnsFlag;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, CuratorDb curator,
AccessControl accessControl, RotationsConfig rotationsConfig,
NameService nameService, ConfigServer configServer,
ArtifactRepository artifactRepository, ApplicationStore applicationStore,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.redirectLegacyDnsFlag = Flags.REDIRECT_LEGACY_DNS_NAMES.bindTo(controller.flagSource());
this.artifactRepository = artifactRepository;
this.applicationStore = applicationStore;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, buildService, clock);
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (Application application : curator.readApplications()) {
lockIfPresent(application.id(), this::store);
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return curator.readApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(curator.readApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(curator.readApplications(tenant));
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(ApplicationId id) {
return get(id).flatMap(application -> application.productionDeployments().keySet().stream()
.flatMap(zone -> configServer().nodeRepository().list(zone, id, EnumSet.of(active, reserved)).stream())
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder()))
.orElse(controller.systemVersion());
}
/** Change the global endpoint status for given deployment */
public void setGlobalRotationStatus(DeploymentId deployment, EndpointStatus status) {
findGlobalEndpoint(deployment).map(endpoint -> {
try {
configServer.setGlobalRotationStatus(deployment, endpoint.upstreamName(), status);
return endpoint;
} catch (IOException e) {
throw new UncheckedIOException("Failed to set rotation status of " + deployment, e);
}
}).orElseThrow(() -> new IllegalArgumentException("No global endpoint exists for " + deployment));
}
/** Get global endpoint status for given deployment */
public Map<RoutingEndpoint, EndpointStatus> globalRotationStatus(DeploymentId deployment) {
return findGlobalEndpoint(deployment).map(endpoint -> {
try {
EndpointStatus status = configServer.getGlobalRotationStatus(deployment, endpoint.upstreamName());
return Collections.singletonMap(endpoint, status);
} catch (IOException e) {
throw new UncheckedIOException("Failed to get rotation status of " + deployment, e);
}
}).orElseGet(Collections::emptyMap);
}
/** Find the global endpoint of given deployment, if any */
public Optional<RoutingEndpoint> findGlobalEndpoint(DeploymentId deployment) {
return routingGenerator.endpoints(deployment).stream()
.filter(RoutingEndpoint::isGlobal)
.findFirst();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<Credentials> credentials) {
if ( ! (id.instance().isDefault()))
throw new IllegalArgumentException("Only the instance name 'default' is supported at the moment");
if (id.instance().isTester())
throw new IllegalArgumentException("'" + id + "' is a tester application!");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().get(id.tenant());
if (tenant.isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (tenant.get().type() != Tenant.Type.user) {
if (credentials.isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': No credentials provided");
if (id.instance().isDefault())
accessControl.createApplication(id, credentials.get());
}
LockedApplication application = new LockedApplication(new Application(id, clock.instant()), lock);
store(application);
log.info("Created " + application);
return application.get();
}
}
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options, Optional.empty());
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
Optional<ApplicationVersion> applicationVersionFromDeployer,
DeployOptions options,
Optional<Principal> deployingIdentity) {
if (applicationId.instance().isTester())
throw new IllegalArgumentException("'" + applicationId + "' is a tester application!");
Tenant tenant = controller.tenants().require(applicationId.tenant());
if (tenant.type() == Tenant.Type.user && get(applicationId).isEmpty())
createApplication(applicationId, Optional.empty());
try (Lock deploymentLock = lockForDeployment(applicationId, zone)) {
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
Set<String> rotationNames = new HashSet<>();
Set<String> cnames;
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(require(applicationId), lock);
boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
if (manuallyDeployed) {
applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown);
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
platformVersion = options.vespaVersion.map(Version::new).orElse(applicationPackage.deploymentSpec().majorVersion()
.flatMap(this::lastCompatibleVersion)
.orElseGet(controller::systemVersion));
}
else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + "."));
Optional<JobStatus> job = Optional.ofNullable(application.get().deploymentJobs().jobStatus().get(jobType));
if ( job.isEmpty()
|| job.get().lastTriggered().isEmpty()
|| job.get().lastCompleted().isPresent() && job.get().lastCompleted().get().at().isAfter(job.get().lastTriggered().get().at()))
return unexpectedDeployment(applicationId, zone);
JobRun triggered = job.get().lastTriggered().get();
platformVersion = preferOldestVersion ? triggered.sourcePlatform().orElse(triggered.platform())
: triggered.platform();
applicationVersion = preferOldestVersion ? triggered.sourceApplication().orElse(triggered.application())
: triggered.application();
applicationPackage = getApplicationPackage(application.get(), applicationVersion);
validateRun(application.get(), zone, platformVersion, applicationVersion);
}
verifyApplicationIdentityConfiguration(applicationId.tenant(), applicationPackage, deployingIdentity);
application = withRotation(application, zone);
Application app = application.get();
cnames = app.endpointsIn(controller.system()).asList().stream().map(Endpoint::dnsName).collect(Collectors.toSet());
if ( ! preferOldestVersion
&& ! application.get().deploymentJobs().deployedInternally()
&& ! zone.environment().isManuallyDeployed())
storeWithUpdatedConfig(application, applicationPackage);
}
options = withVersion(platformVersion, options);
ActivateResult result = deploy(applicationId, applicationPackage, zone, options, rotationNames, cnames);
lockOrThrow(applicationId, application ->
store(application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(),
warningsFrom(result))));
return result;
}
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(Application application, ApplicationVersion version) {
try {
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(applicationStore.get(application.id(), version))
: new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()));
}
catch (RuntimeException e) {
try {
log.info("Fetching application package for " + application.id() + " from alternate repository; it is now deployed "
+ (application.deploymentJobs().deployedInternally() ? "internally" : "externally") + "\nException was: " + Exceptions.toMessageString(e));
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()))
: new ApplicationPackage(applicationStore.get(application.id(), version));
}
catch (RuntimeException s) {
e.addSuppressed(s);
throw e;
}
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
validate(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = withoutDeletedDeployments(application);
application = withoutUnreferencedDeploymentJobs(application);
store(application);
return application;
}
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
application.nodeTypes().forEach(nodeType -> configServer().nodeRepository().upgrade(zone, nodeType, version));
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
DeployOptions options = withVersion(version, DeployOptions.none());
return deploy(application.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, DeployOptions options) {
return deploy(tester.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, DeployOptions deployOptions,
Set<String> rotationNames, Set<String> cnames) {
DeploymentId deploymentId = new DeploymentId(application, zone);
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(deploymentId, deployOptions, cnames, rotationNames,
applicationPackage.zippedContent());
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
/** Makes sure the application has a global rotation, if eligible. */
private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication withoutDeletedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.get().productionDeployments().values().stream()
.filter(deployment -> ! application.get().deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get() +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication withoutUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : JobList.from(application.get()).production().mapToList(JobStatus::type)) {
ZoneId zone = job.zone(controller.system());
if (application.get().deploymentSpec().includes(zone.environment(), Optional.of(zone.region())))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.deployDirectly,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a CNAME record in DNS */
private void registerCname(String name, String targetName) {
try {
RecordData data = RecordData.fqdn(targetName);
List<Record> records = nameService.findRecords(Record.Type.CNAME, RecordName.from(name));
records.forEach(record -> {
if ( ! record.data().equals(data)) {
log.info("Updating mapping for record '" + record + "': '" + name
+ "' -> '" + data.asString() + "'");
nameService.updateRecord(record, data);
}
});
if (records.isEmpty()) {
Record record = nameService.createCname(RecordName.from(name), data);
log.info("Registered mapping as record '" + record + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
if ( ! get(deploymentId.applicationId())
.map(application -> application.deployments().containsKey(deploymentId.zoneId()))
.orElse(deploymentId.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", deploymentId.toString());
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::endpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<Credentials> credentials) {
Tenant tenant = controller.tenants().require(applicationId.tenant());
if (tenant.type() != Tenant.Type.user && ! credentials.isPresent())
throw new IllegalArgumentException("Could not delete application '" + applicationId + "': No credentials provided");
List<ApplicationId> instances = asList(applicationId.tenant()).stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.get().deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
curator.removeApplication(id);
applicationStore.removeAll(id);
applicationStore.removeAll(TesterId.of(id));
log.info("Deleted " + application);
}));
if (tenant.type() != Tenant.Type.user)
accessControl.deleteApplication(applicationId, credentials.get());
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
configServer.restart(deploymentId, hostname);
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
try {
return configServer.isSuspended(deploymentId);
}
catch (ConfigServerException e) {
if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND)
return false;
throw e;
}
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId application, ZoneId zone) {
lockOrThrow(application, lockedApplication -> store(deactivate(lockedApplication, zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.get().id(), zone));
}
catch (NotFoundException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServer configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
new DeploymentSteps(deploymentSpec, controller::system).jobs();
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
/** Verify that we don't downgrade an existing production deployment. */
private void validateRun(Application application, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
Deployment deployment = application.deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
&& ( platformVersion.compareTo(deployment.version()) < 0 && ! application.change().isPinned()
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
application, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
/** Returns the rotation repository, used for managing global rotation assignments */
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Returns all known routing policies for given application */
public Set<RoutingPolicy> routingPolicies(ApplicationId application) {
return curator.readRoutingPolicies(application);
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 2. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName Tenant where application should be deployed
* @param applicationPackage Application package
* @param deployer Principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
applicationPackage.deploymentSpec().athenzDomain().ifPresent(identityDomain -> {
Tenant tenant = controller.tenants().require(tenantName);
deployer.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.ifPresentOrElse(user -> {
if ( ! ((AthenzFacade) accessControl).hasTenantAdminAccess(user, new AthenzDomain(identityDomain.value())))
throw new IllegalArgumentException("User " + user.getFullName() + " is not allowed to launch " +
"services in Athenz domain " + identityDomain.value() + ". " +
"Please reach out to the domain admin.");
},
() -> {
if (tenant.type() != Tenant.Type.athenz)
throw new IllegalArgumentException("Athenz domain defined in deployment.xml, but no " +
"Athenz domain for tenant " + tenantName.value());
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain.getName(), identityDomain.value()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.value() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
});
});
}
/** Returns the latest known version within the given major. */
private Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.versionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Collections.unmodifiableMap(warnings);
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final RotationRepository rotationRepository;
private final AccessControl accessControl;
private final NameService nameService;
private final ConfigServer configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final BooleanFlag redirectLegacyDnsFlag;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, CuratorDb curator,
AccessControl accessControl, RotationsConfig rotationsConfig,
NameService nameService, ConfigServer configServer,
ArtifactRepository artifactRepository, ApplicationStore applicationStore,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.redirectLegacyDnsFlag = Flags.REDIRECT_LEGACY_DNS_NAMES.bindTo(controller.flagSource());
this.artifactRepository = artifactRepository;
this.applicationStore = applicationStore;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, buildService, clock);
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (Application application : curator.readApplications()) {
lockIfPresent(application.id(), this::store);
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return curator.readApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(curator.readApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(curator.readApplications(tenant));
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(ApplicationId id) {
return get(id).flatMap(application -> application.productionDeployments().keySet().stream()
.flatMap(zone -> configServer().nodeRepository().list(zone, id, EnumSet.of(active, reserved)).stream())
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder()))
.orElse(controller.systemVersion());
}
/** Change the global endpoint status for given deployment */
public void setGlobalRotationStatus(DeploymentId deployment, EndpointStatus status) {
findGlobalEndpoint(deployment).map(endpoint -> {
try {
configServer.setGlobalRotationStatus(deployment, endpoint.upstreamName(), status);
return endpoint;
} catch (IOException e) {
throw new UncheckedIOException("Failed to set rotation status of " + deployment, e);
}
}).orElseThrow(() -> new IllegalArgumentException("No global endpoint exists for " + deployment));
}
/** Get global endpoint status for given deployment */
public Map<RoutingEndpoint, EndpointStatus> globalRotationStatus(DeploymentId deployment) {
return findGlobalEndpoint(deployment).map(endpoint -> {
try {
EndpointStatus status = configServer.getGlobalRotationStatus(deployment, endpoint.upstreamName());
return Collections.singletonMap(endpoint, status);
} catch (IOException e) {
throw new UncheckedIOException("Failed to get rotation status of " + deployment, e);
}
}).orElseGet(Collections::emptyMap);
}
/** Find the global endpoint of given deployment, if any */
public Optional<RoutingEndpoint> findGlobalEndpoint(DeploymentId deployment) {
return routingGenerator.endpoints(deployment).stream()
.filter(RoutingEndpoint::isGlobal)
.findFirst();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<Credentials> credentials) {
if ( ! (id.instance().isDefault()))
throw new IllegalArgumentException("Only the instance name 'default' is supported at the moment");
if (id.instance().isTester())
throw new IllegalArgumentException("'" + id + "' is a tester application!");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().get(id.tenant());
if (tenant.isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (tenant.get().type() != Tenant.Type.user) {
if (credentials.isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': No credentials provided");
if (id.instance().isDefault())
accessControl.createApplication(id, credentials.get());
}
LockedApplication application = new LockedApplication(new Application(id, clock.instant()), lock);
store(application);
log.info("Created " + application);
return application.get();
}
}
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options, Optional.empty());
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
Optional<ApplicationVersion> applicationVersionFromDeployer,
DeployOptions options,
Optional<Principal> deployingIdentity) {
if (applicationId.instance().isTester())
throw new IllegalArgumentException("'" + applicationId + "' is a tester application!");
Tenant tenant = controller.tenants().require(applicationId.tenant());
if (tenant.type() == Tenant.Type.user && get(applicationId).isEmpty())
createApplication(applicationId, Optional.empty());
try (Lock deploymentLock = lockForDeployment(applicationId, zone)) {
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
Set<String> rotationNames = new HashSet<>();
Set<String> cnames;
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(require(applicationId), lock);
boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
if (manuallyDeployed) {
applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown);
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
platformVersion = options.vespaVersion.map(Version::new).orElse(applicationPackage.deploymentSpec().majorVersion()
.flatMap(this::lastCompatibleVersion)
.orElseGet(controller::systemVersion));
}
else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + "."));
Optional<JobStatus> job = Optional.ofNullable(application.get().deploymentJobs().jobStatus().get(jobType));
if ( job.isEmpty()
|| job.get().lastTriggered().isEmpty()
|| job.get().lastCompleted().isPresent() && job.get().lastCompleted().get().at().isAfter(job.get().lastTriggered().get().at()))
return unexpectedDeployment(applicationId, zone);
JobRun triggered = job.get().lastTriggered().get();
platformVersion = preferOldestVersion ? triggered.sourcePlatform().orElse(triggered.platform())
: triggered.platform();
applicationVersion = preferOldestVersion ? triggered.sourceApplication().orElse(triggered.application())
: triggered.application();
applicationPackage = getApplicationPackage(application.get(), applicationVersion);
validateRun(application.get(), zone, platformVersion, applicationVersion);
}
verifyApplicationIdentityConfiguration(applicationId.tenant(), applicationPackage, deployingIdentity);
application = withRotation(application, zone);
Application app = application.get();
cnames = app.endpointsIn(controller.system()).asList().stream().map(Endpoint::dnsName).collect(Collectors.toSet());
if ( ! preferOldestVersion
&& ! application.get().deploymentJobs().deployedInternally()
&& ! zone.environment().isManuallyDeployed())
storeWithUpdatedConfig(application, applicationPackage);
}
options = withVersion(platformVersion, options);
ActivateResult result = deploy(applicationId, applicationPackage, zone, options, rotationNames, cnames);
lockOrThrow(applicationId, application ->
store(application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(),
warningsFrom(result))));
return result;
}
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(Application application, ApplicationVersion version) {
try {
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(applicationStore.get(application.id(), version))
: new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()));
}
catch (RuntimeException e) {
try {
log.info("Fetching application package for " + application.id() + " from alternate repository; it is now deployed "
+ (application.deploymentJobs().deployedInternally() ? "internally" : "externally") + "\nException was: " + Exceptions.toMessageString(e));
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()))
: new ApplicationPackage(applicationStore.get(application.id(), version));
}
catch (RuntimeException s) {
e.addSuppressed(s);
throw e;
}
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
validate(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = withoutDeletedDeployments(application);
application = withoutUnreferencedDeploymentJobs(application);
store(application);
return application;
}
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
application.nodeTypes().forEach(nodeType -> configServer().nodeRepository().upgrade(zone, nodeType, version));
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
DeployOptions options = withVersion(version, DeployOptions.none());
return deploy(application.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, DeployOptions options) {
return deploy(tester.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, DeployOptions deployOptions,
Set<String> rotationNames, Set<String> cnames) {
DeploymentId deploymentId = new DeploymentId(application, zone);
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(deploymentId, deployOptions, cnames, rotationNames,
applicationPackage.zippedContent());
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
/** Makes sure the application has a global rotation, if eligible. */
private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication withoutDeletedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.get().productionDeployments().values().stream()
.filter(deployment -> ! application.get().deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get() +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication withoutUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : JobList.from(application.get()).production().mapToList(JobStatus::type)) {
ZoneId zone = job.zone(controller.system());
if (application.get().deploymentSpec().includes(zone.environment(), Optional.of(zone.region())))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.deployDirectly,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a CNAME record in DNS */
private void registerCname(String name, String targetName) {
try {
RecordData data = RecordData.fqdn(targetName);
List<Record> records = nameService.findRecords(Record.Type.CNAME, RecordName.from(name));
records.forEach(record -> {
if ( ! record.data().equals(data)) {
log.info("Updating mapping for record '" + record + "': '" + name
+ "' -> '" + data.asString() + "'");
nameService.updateRecord(record, data);
}
});
if (records.isEmpty()) {
Record record = nameService.createCname(RecordName.from(name), data);
log.info("Registered mapping as record '" + record + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
if ( ! get(deploymentId.applicationId())
.map(application -> application.deployments().containsKey(deploymentId.zoneId()))
.orElse(deploymentId.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", deploymentId.toString());
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::endpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<Credentials> credentials) {
Tenant tenant = controller.tenants().require(applicationId.tenant());
if (tenant.type() != Tenant.Type.user && ! credentials.isPresent())
throw new IllegalArgumentException("Could not delete application '" + applicationId + "': No credentials provided");
List<ApplicationId> instances = asList(applicationId.tenant()).stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.get().deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
curator.removeApplication(id);
applicationStore.removeAll(id);
applicationStore.removeAll(TesterId.of(id));
log.info("Deleted " + application);
}));
if (tenant.type() != Tenant.Type.user)
accessControl.deleteApplication(applicationId, credentials.get());
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
configServer.restart(deploymentId, hostname);
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
try {
return configServer.isSuspended(deploymentId);
}
catch (ConfigServerException e) {
if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND)
return false;
throw e;
}
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId application, ZoneId zone) {
lockOrThrow(application, lockedApplication -> store(deactivate(lockedApplication, zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.get().id(), zone));
}
catch (NotFoundException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServer configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
new DeploymentSteps(deploymentSpec, controller::system).jobs();
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
/** Verify that we don't downgrade an existing production deployment. */
private void validateRun(Application application, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
Deployment deployment = application.deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
&& ( platformVersion.compareTo(deployment.version()) < 0 && ! application.change().isPinned()
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
application, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
/** Returns the rotation repository, used for managing global rotation assignments */
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Returns all known routing policies for given application */
public Set<RoutingPolicy> routingPolicies(ApplicationId application) {
return curator.readRoutingPolicies(application);
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 2. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName Tenant where application should be deployed
* @param applicationPackage Application package
* @param deployer Principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
applicationPackage.deploymentSpec().athenzDomain().ifPresent(identityDomain -> {
Tenant tenant = controller.tenants().require(tenantName);
deployer.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.ifPresentOrElse(user -> {
if ( ! ((AthenzFacade) accessControl).hasTenantAdminAccess(user, new AthenzDomain(identityDomain.value())))
throw new IllegalArgumentException("User " + user.getFullName() + " is not allowed to launch " +
"services in Athenz domain " + identityDomain.value() + ". " +
"Please reach out to the domain admin.");
},
() -> {
if (tenant.type() != Tenant.Type.athenz)
throw new IllegalArgumentException("Athenz domain defined in deployment.xml, but no " +
"Athenz domain for tenant " + tenantName.value());
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain.getName(), identityDomain.value()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.value() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
});
});
}
/** Returns the latest known version within the given major. */
private Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.versionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Collections.unmodifiableMap(warnings);
}
} |
Do you mean the optional check? The endpoints are now expressed as a list instead of an optional, and acting on the empty list doesn't need special handling. It will never be empty here though because a rotation has been assigned in the code above. | private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.get().deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getOrAssignRotation(application.get(), rotationLock);
application = application.with(rotation.id());
store(application);
boolean redirectLegacyDns = redirectLegacyDnsFlag.with(FetchVector.Dimension.APPLICATION_ID, application.get().id().serializedForm())
.value();
EndpointList globalEndpoints = application.get()
.endpointsIn(controller.system())
.scope(Endpoint.Scope.global);
globalEndpoints.main().ifPresent(mainEndpoint -> {
registerCname(mainEndpoint.dnsName(), rotation.name());
if (redirectLegacyDns) {
globalEndpoints.legacy(true).asList().forEach(endpoint -> registerCname(endpoint.dnsName(), mainEndpoint.dnsName()));
} else {
globalEndpoints.legacy(true).asList().forEach(endpoint -> registerCname(endpoint.dnsName(), rotation.name()));
}
});
}
}
return application;
} | .scope(Endpoint.Scope.global); | private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.get().deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getOrAssignRotation(application.get(), rotationLock);
application = application.with(rotation.id());
store(application);
boolean redirectLegacyDns = redirectLegacyDnsFlag.with(FetchVector.Dimension.APPLICATION_ID, application.get().id().serializedForm())
.value();
EndpointList globalEndpoints = application.get()
.endpointsIn(controller.system())
.scope(Endpoint.Scope.global);
globalEndpoints.main().ifPresent(mainEndpoint -> {
registerCname(mainEndpoint.dnsName(), rotation.name());
if (redirectLegacyDns) {
globalEndpoints.legacy(true).asList().forEach(endpoint -> registerCname(endpoint.dnsName(), mainEndpoint.dnsName()));
} else {
globalEndpoints.legacy(true).asList().forEach(endpoint -> registerCname(endpoint.dnsName(), rotation.name()));
}
});
}
}
return application;
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final RotationRepository rotationRepository;
private final AccessControl accessControl;
private final NameService nameService;
private final ConfigServer configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final BooleanFlag redirectLegacyDnsFlag;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, CuratorDb curator,
AccessControl accessControl, RotationsConfig rotationsConfig,
NameService nameService, ConfigServer configServer,
ArtifactRepository artifactRepository, ApplicationStore applicationStore,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.redirectLegacyDnsFlag = Flags.REDIRECT_LEGACY_DNS_NAMES.bindTo(controller.flagSource());
this.artifactRepository = artifactRepository;
this.applicationStore = applicationStore;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, buildService, clock);
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (Application application : curator.readApplications()) {
lockIfPresent(application.id(), this::store);
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return curator.readApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(curator.readApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(curator.readApplications(tenant));
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(ApplicationId id) {
return get(id).flatMap(application -> application.productionDeployments().keySet().stream()
.flatMap(zone -> configServer().nodeRepository().list(zone, id, EnumSet.of(active, reserved)).stream())
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder()))
.orElse(controller.systemVersion());
}
/** Change the global endpoint status for given deployment */
public void setGlobalRotationStatus(DeploymentId deployment, EndpointStatus status) {
findGlobalEndpoint(deployment).map(endpoint -> {
try {
configServer.setGlobalRotationStatus(deployment, endpoint.upstreamName(), status);
return endpoint;
} catch (IOException e) {
throw new UncheckedIOException("Failed to set rotation status of " + deployment, e);
}
}).orElseThrow(() -> new IllegalArgumentException("No global endpoint exists for " + deployment));
}
/** Get global endpoint status for given deployment */
public Map<RoutingEndpoint, EndpointStatus> globalRotationStatus(DeploymentId deployment) {
return findGlobalEndpoint(deployment).map(endpoint -> {
try {
EndpointStatus status = configServer.getGlobalRotationStatus(deployment, endpoint.upstreamName());
return Collections.singletonMap(endpoint, status);
} catch (IOException e) {
throw new UncheckedIOException("Failed to get rotation status of " + deployment, e);
}
}).orElseGet(Collections::emptyMap);
}
/** Find the global endpoint of given deployment, if any */
public Optional<RoutingEndpoint> findGlobalEndpoint(DeploymentId deployment) {
return routingGenerator.endpoints(deployment).stream()
.filter(RoutingEndpoint::isGlobal)
.findFirst();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<Credentials> credentials) {
if ( ! (id.instance().isDefault()))
throw new IllegalArgumentException("Only the instance name 'default' is supported at the moment");
if (id.instance().isTester())
throw new IllegalArgumentException("'" + id + "' is a tester application!");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().get(id.tenant());
if (tenant.isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (tenant.get().type() != Tenant.Type.user) {
if (credentials.isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': No credentials provided");
if (id.instance().isDefault())
accessControl.createApplication(id, credentials.get());
}
LockedApplication application = new LockedApplication(new Application(id, clock.instant()), lock);
store(application);
log.info("Created " + application);
return application.get();
}
}
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options, Optional.empty());
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
Optional<ApplicationVersion> applicationVersionFromDeployer,
DeployOptions options,
Optional<Principal> deployingIdentity) {
if (applicationId.instance().isTester())
throw new IllegalArgumentException("'" + applicationId + "' is a tester application!");
Tenant tenant = controller.tenants().require(applicationId.tenant());
if (tenant.type() == Tenant.Type.user && get(applicationId).isEmpty())
createApplication(applicationId, Optional.empty());
try (Lock deploymentLock = lockForDeployment(applicationId, zone)) {
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
Set<String> rotationNames = new HashSet<>();
Set<String> cnames;
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(require(applicationId), lock);
boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
if (manuallyDeployed) {
applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown);
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
platformVersion = options.vespaVersion.map(Version::new).orElse(applicationPackage.deploymentSpec().majorVersion()
.flatMap(this::lastCompatibleVersion)
.orElseGet(controller::systemVersion));
}
else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + "."));
Optional<JobStatus> job = Optional.ofNullable(application.get().deploymentJobs().jobStatus().get(jobType));
if ( job.isEmpty()
|| job.get().lastTriggered().isEmpty()
|| job.get().lastCompleted().isPresent() && job.get().lastCompleted().get().at().isAfter(job.get().lastTriggered().get().at()))
return unexpectedDeployment(applicationId, zone);
JobRun triggered = job.get().lastTriggered().get();
platformVersion = preferOldestVersion ? triggered.sourcePlatform().orElse(triggered.platform())
: triggered.platform();
applicationVersion = preferOldestVersion ? triggered.sourceApplication().orElse(triggered.application())
: triggered.application();
applicationPackage = getApplicationPackage(application.get(), applicationVersion);
validateRun(application.get(), zone, platformVersion, applicationVersion);
}
verifyApplicationIdentityConfiguration(applicationId.tenant(), applicationPackage, deployingIdentity);
application = withRotation(application, zone);
Application app = application.get();
cnames = app.endpointsIn(controller.system()).asList().stream().map(Endpoint::dnsName).collect(Collectors.toSet());
if ( ! preferOldestVersion
&& ! application.get().deploymentJobs().deployedInternally()
&& ! zone.environment().isManuallyDeployed())
storeWithUpdatedConfig(application, applicationPackage);
}
options = withVersion(platformVersion, options);
ActivateResult result = deploy(applicationId, applicationPackage, zone, options, rotationNames, cnames);
lockOrThrow(applicationId, application ->
store(application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(),
warningsFrom(result))));
return result;
}
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(Application application, ApplicationVersion version) {
try {
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(applicationStore.get(application.id(), version))
: new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()));
}
catch (RuntimeException e) {
try {
log.info("Fetching application package for " + application.id() + " from alternate repository; it is now deployed "
+ (application.deploymentJobs().deployedInternally() ? "internally" : "externally") + "\nException was: " + Exceptions.toMessageString(e));
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()))
: new ApplicationPackage(applicationStore.get(application.id(), version));
}
catch (RuntimeException s) {
e.addSuppressed(s);
throw e;
}
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
validate(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = withoutDeletedDeployments(application);
application = withoutUnreferencedDeploymentJobs(application);
store(application);
return application;
}
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
application.nodeTypes().forEach(nodeType -> configServer().nodeRepository().upgrade(zone, nodeType, version));
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
DeployOptions options = withVersion(version, DeployOptions.none());
return deploy(application.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, DeployOptions options) {
return deploy(tester.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, DeployOptions deployOptions,
Set<String> rotationNames, Set<String> cnames) {
DeploymentId deploymentId = new DeploymentId(application, zone);
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(deploymentId, deployOptions, cnames, rotationNames,
applicationPackage.zippedContent());
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
/** Makes sure the application has a global rotation, if eligible. */
private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication withoutDeletedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.get().productionDeployments().values().stream()
.filter(deployment -> ! application.get().deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get() +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication withoutUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : JobList.from(application.get()).production().mapToList(JobStatus::type)) {
ZoneId zone = job.zone(controller.system());
if (application.get().deploymentSpec().includes(zone.environment(), Optional.of(zone.region())))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.deployDirectly,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a CNAME record in DNS */
private void registerCname(String name, String targetName) {
try {
RecordData data = RecordData.fqdn(targetName);
List<Record> records = nameService.findRecords(Record.Type.CNAME, RecordName.from(name));
records.forEach(record -> {
if ( ! record.data().equals(data)) {
log.info("Updating mapping for record '" + record + "': '" + name
+ "' -> '" + data.asString() + "'");
nameService.updateRecord(record, data);
}
});
if (records.isEmpty()) {
Record record = nameService.createCname(RecordName.from(name), data);
log.info("Registered mapping as record '" + record + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
if ( ! get(deploymentId.applicationId())
.map(application -> application.deployments().containsKey(deploymentId.zoneId()))
.orElse(deploymentId.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", deploymentId.toString());
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::endpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<Credentials> credentials) {
Tenant tenant = controller.tenants().require(applicationId.tenant());
if (tenant.type() != Tenant.Type.user && ! credentials.isPresent())
throw new IllegalArgumentException("Could not delete application '" + applicationId + "': No credentials provided");
List<ApplicationId> instances = asList(applicationId.tenant()).stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.get().deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
curator.removeApplication(id);
applicationStore.removeAll(id);
applicationStore.removeAll(TesterId.of(id));
log.info("Deleted " + application);
}));
if (tenant.type() != Tenant.Type.user)
accessControl.deleteApplication(applicationId, credentials.get());
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
configServer.restart(deploymentId, hostname);
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
try {
return configServer.isSuspended(deploymentId);
}
catch (ConfigServerException e) {
if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND)
return false;
throw e;
}
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId application, ZoneId zone) {
lockOrThrow(application, lockedApplication -> store(deactivate(lockedApplication, zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.get().id(), zone));
}
catch (NotFoundException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServer configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
new DeploymentSteps(deploymentSpec, controller::system).jobs();
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
/** Verify that we don't downgrade an existing production deployment. */
private void validateRun(Application application, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
Deployment deployment = application.deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
&& ( platformVersion.compareTo(deployment.version()) < 0 && ! application.change().isPinned()
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
application, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
/** Returns the rotation repository, used for managing global rotation assignments */
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Returns all known routing policies for given application */
public Set<RoutingPolicy> routingPolicies(ApplicationId application) {
return curator.readRoutingPolicies(application);
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 2. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName Tenant where application should be deployed
* @param applicationPackage Application package
* @param deployer Principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
applicationPackage.deploymentSpec().athenzDomain().ifPresent(identityDomain -> {
Tenant tenant = controller.tenants().require(tenantName);
deployer.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.ifPresentOrElse(user -> {
if ( ! ((AthenzFacade) accessControl).hasTenantAdminAccess(user, new AthenzDomain(identityDomain.value())))
throw new IllegalArgumentException("User " + user.getFullName() + " is not allowed to launch " +
"services in Athenz domain " + identityDomain.value() + ". " +
"Please reach out to the domain admin.");
},
() -> {
if (tenant.type() != Tenant.Type.athenz)
throw new IllegalArgumentException("Athenz domain defined in deployment.xml, but no " +
"Athenz domain for tenant " + tenantName.value());
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain.getName(), identityDomain.value()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.value() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
});
});
}
/** Returns the latest known version within the given major. */
private Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.versionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Collections.unmodifiableMap(warnings);
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final RotationRepository rotationRepository;
private final AccessControl accessControl;
private final NameService nameService;
private final ConfigServer configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final BooleanFlag redirectLegacyDnsFlag;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, CuratorDb curator,
AccessControl accessControl, RotationsConfig rotationsConfig,
NameService nameService, ConfigServer configServer,
ArtifactRepository artifactRepository, ApplicationStore applicationStore,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.redirectLegacyDnsFlag = Flags.REDIRECT_LEGACY_DNS_NAMES.bindTo(controller.flagSource());
this.artifactRepository = artifactRepository;
this.applicationStore = applicationStore;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, buildService, clock);
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (Application application : curator.readApplications()) {
lockIfPresent(application.id(), this::store);
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return curator.readApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(curator.readApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(curator.readApplications(tenant));
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(ApplicationId id) {
return get(id).flatMap(application -> application.productionDeployments().keySet().stream()
.flatMap(zone -> configServer().nodeRepository().list(zone, id, EnumSet.of(active, reserved)).stream())
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder()))
.orElse(controller.systemVersion());
}
/** Change the global endpoint status for given deployment */
public void setGlobalRotationStatus(DeploymentId deployment, EndpointStatus status) {
findGlobalEndpoint(deployment).map(endpoint -> {
try {
configServer.setGlobalRotationStatus(deployment, endpoint.upstreamName(), status);
return endpoint;
} catch (IOException e) {
throw new UncheckedIOException("Failed to set rotation status of " + deployment, e);
}
}).orElseThrow(() -> new IllegalArgumentException("No global endpoint exists for " + deployment));
}
/** Get global endpoint status for given deployment */
public Map<RoutingEndpoint, EndpointStatus> globalRotationStatus(DeploymentId deployment) {
return findGlobalEndpoint(deployment).map(endpoint -> {
try {
EndpointStatus status = configServer.getGlobalRotationStatus(deployment, endpoint.upstreamName());
return Collections.singletonMap(endpoint, status);
} catch (IOException e) {
throw new UncheckedIOException("Failed to get rotation status of " + deployment, e);
}
}).orElseGet(Collections::emptyMap);
}
/** Find the global endpoint of given deployment, if any */
public Optional<RoutingEndpoint> findGlobalEndpoint(DeploymentId deployment) {
return routingGenerator.endpoints(deployment).stream()
.filter(RoutingEndpoint::isGlobal)
.findFirst();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<Credentials> credentials) {
if ( ! (id.instance().isDefault()))
throw new IllegalArgumentException("Only the instance name 'default' is supported at the moment");
if (id.instance().isTester())
throw new IllegalArgumentException("'" + id + "' is a tester application!");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().get(id.tenant());
if (tenant.isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (tenant.get().type() != Tenant.Type.user) {
if (credentials.isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': No credentials provided");
if (id.instance().isDefault())
accessControl.createApplication(id, credentials.get());
}
LockedApplication application = new LockedApplication(new Application(id, clock.instant()), lock);
store(application);
log.info("Created " + application);
return application.get();
}
}
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options, Optional.empty());
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
Optional<ApplicationVersion> applicationVersionFromDeployer,
DeployOptions options,
Optional<Principal> deployingIdentity) {
if (applicationId.instance().isTester())
throw new IllegalArgumentException("'" + applicationId + "' is a tester application!");
Tenant tenant = controller.tenants().require(applicationId.tenant());
if (tenant.type() == Tenant.Type.user && get(applicationId).isEmpty())
createApplication(applicationId, Optional.empty());
try (Lock deploymentLock = lockForDeployment(applicationId, zone)) {
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
Set<String> rotationNames = new HashSet<>();
Set<String> cnames;
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(require(applicationId), lock);
boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
if (manuallyDeployed) {
applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown);
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
platformVersion = options.vespaVersion.map(Version::new).orElse(applicationPackage.deploymentSpec().majorVersion()
.flatMap(this::lastCompatibleVersion)
.orElseGet(controller::systemVersion));
}
else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + "."));
Optional<JobStatus> job = Optional.ofNullable(application.get().deploymentJobs().jobStatus().get(jobType));
if ( job.isEmpty()
|| job.get().lastTriggered().isEmpty()
|| job.get().lastCompleted().isPresent() && job.get().lastCompleted().get().at().isAfter(job.get().lastTriggered().get().at()))
return unexpectedDeployment(applicationId, zone);
JobRun triggered = job.get().lastTriggered().get();
platformVersion = preferOldestVersion ? triggered.sourcePlatform().orElse(triggered.platform())
: triggered.platform();
applicationVersion = preferOldestVersion ? triggered.sourceApplication().orElse(triggered.application())
: triggered.application();
applicationPackage = getApplicationPackage(application.get(), applicationVersion);
validateRun(application.get(), zone, platformVersion, applicationVersion);
}
verifyApplicationIdentityConfiguration(applicationId.tenant(), applicationPackage, deployingIdentity);
application = withRotation(application, zone);
Application app = application.get();
cnames = app.endpointsIn(controller.system()).asList().stream().map(Endpoint::dnsName).collect(Collectors.toSet());
if ( ! preferOldestVersion
&& ! application.get().deploymentJobs().deployedInternally()
&& ! zone.environment().isManuallyDeployed())
storeWithUpdatedConfig(application, applicationPackage);
}
options = withVersion(platformVersion, options);
ActivateResult result = deploy(applicationId, applicationPackage, zone, options, rotationNames, cnames);
lockOrThrow(applicationId, application ->
store(application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(),
warningsFrom(result))));
return result;
}
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(Application application, ApplicationVersion version) {
try {
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(applicationStore.get(application.id(), version))
: new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()));
}
catch (RuntimeException e) {
try {
log.info("Fetching application package for " + application.id() + " from alternate repository; it is now deployed "
+ (application.deploymentJobs().deployedInternally() ? "internally" : "externally") + "\nException was: " + Exceptions.toMessageString(e));
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()))
: new ApplicationPackage(applicationStore.get(application.id(), version));
}
catch (RuntimeException s) {
e.addSuppressed(s);
throw e;
}
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
validate(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = withoutDeletedDeployments(application);
application = withoutUnreferencedDeploymentJobs(application);
store(application);
return application;
}
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
application.nodeTypes().forEach(nodeType -> configServer().nodeRepository().upgrade(zone, nodeType, version));
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
DeployOptions options = withVersion(version, DeployOptions.none());
return deploy(application.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, DeployOptions options) {
return deploy(tester.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, DeployOptions deployOptions,
Set<String> rotationNames, Set<String> cnames) {
DeploymentId deploymentId = new DeploymentId(application, zone);
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(deploymentId, deployOptions, cnames, rotationNames,
applicationPackage.zippedContent());
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
/** Makes sure the application has a global rotation, if eligible. */
private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication withoutDeletedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.get().productionDeployments().values().stream()
.filter(deployment -> ! application.get().deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get() +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication withoutUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : JobList.from(application.get()).production().mapToList(JobStatus::type)) {
ZoneId zone = job.zone(controller.system());
if (application.get().deploymentSpec().includes(zone.environment(), Optional.of(zone.region())))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.deployDirectly,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a CNAME record in DNS */
private void registerCname(String name, String targetName) {
try {
RecordData data = RecordData.fqdn(targetName);
List<Record> records = nameService.findRecords(Record.Type.CNAME, RecordName.from(name));
records.forEach(record -> {
if ( ! record.data().equals(data)) {
log.info("Updating mapping for record '" + record + "': '" + name
+ "' -> '" + data.asString() + "'");
nameService.updateRecord(record, data);
}
});
if (records.isEmpty()) {
Record record = nameService.createCname(RecordName.from(name), data);
log.info("Registered mapping as record '" + record + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
if ( ! get(deploymentId.applicationId())
.map(application -> application.deployments().containsKey(deploymentId.zoneId()))
.orElse(deploymentId.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", deploymentId.toString());
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::endpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<Credentials> credentials) {
Tenant tenant = controller.tenants().require(applicationId.tenant());
if (tenant.type() != Tenant.Type.user && ! credentials.isPresent())
throw new IllegalArgumentException("Could not delete application '" + applicationId + "': No credentials provided");
List<ApplicationId> instances = asList(applicationId.tenant()).stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.get().deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
curator.removeApplication(id);
applicationStore.removeAll(id);
applicationStore.removeAll(TesterId.of(id));
log.info("Deleted " + application);
}));
if (tenant.type() != Tenant.Type.user)
accessControl.deleteApplication(applicationId, credentials.get());
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
configServer.restart(deploymentId, hostname);
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
try {
return configServer.isSuspended(deploymentId);
}
catch (ConfigServerException e) {
if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND)
return false;
throw e;
}
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId application, ZoneId zone) {
lockOrThrow(application, lockedApplication -> store(deactivate(lockedApplication, zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.get().id(), zone));
}
catch (NotFoundException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServer configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
new DeploymentSteps(deploymentSpec, controller::system).jobs();
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
/** Verify that we don't downgrade an existing production deployment. */
private void validateRun(Application application, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
Deployment deployment = application.deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
&& ( platformVersion.compareTo(deployment.version()) < 0 && ! application.change().isPinned()
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
application, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
/** Returns the rotation repository, used for managing global rotation assignments */
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Returns all known routing policies for given application */
public Set<RoutingPolicy> routingPolicies(ApplicationId application) {
return curator.readRoutingPolicies(application);
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 2. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName Tenant where application should be deployed
* @param applicationPackage Application package
* @param deployer Principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
applicationPackage.deploymentSpec().athenzDomain().ifPresent(identityDomain -> {
Tenant tenant = controller.tenants().require(tenantName);
deployer.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.ifPresentOrElse(user -> {
if ( ! ((AthenzFacade) accessControl).hasTenantAdminAccess(user, new AthenzDomain(identityDomain.value())))
throw new IllegalArgumentException("User " + user.getFullName() + " is not allowed to launch " +
"services in Athenz domain " + identityDomain.value() + ". " +
"Please reach out to the domain admin.");
},
() -> {
if (tenant.type() != Tenant.Type.athenz)
throw new IllegalArgumentException("Athenz domain defined in deployment.xml, but no " +
"Athenz domain for tenant " + tenantName.value());
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain.getName(), identityDomain.value()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.value() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
});
});
}
/** Returns the latest known version within the given major. */
private Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.versionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Collections.unmodifiableMap(warnings);
}
} |
I'm not sure about this. It's a rotation, so it should not include the instance name - rotations and instances are a many-to-many relation | public void test_global_endpoints() {
RotationName rotation = RotationName.from("default");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(rotation).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(RotationName.from("r1")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(RotationName.from("r2")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
} | "https: | public void test_global_endpoints() {
RotationName rotation = RotationName.from("default");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(rotation).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(RotationName.from("r1")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(RotationName.from("r2")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
} | class EndpointTest {
private static final ApplicationId app1 = ApplicationId.from("t1", "a1", "default");
private static final ApplicationId app2 = ApplicationId.from("t2", "a2", "i2");
@Test
@Test
public void test_zone_endpoints() {
ClusterSpec.Id cluster = ClusterSpec.Id.from("default");
ZoneId prodZone = ZoneId.from("prod", "us-north-1");
ZoneId testZone = ZoneId.from("test", "us-north-2");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(cluster, prodZone).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(cluster, testZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).in(SystemName.main),
"https:
Endpoint.of(app2).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public),
"https:
Endpoint.of(app2).target(ClusterSpec.Id.from("c2"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
}
} | class EndpointTest {
private static final ApplicationId app1 = ApplicationId.from("t1", "a1", "default");
private static final ApplicationId app2 = ApplicationId.from("t2", "a2", "i2");
@Test
@Test
public void test_zone_endpoints() {
ClusterSpec.Id cluster = ClusterSpec.Id.from("default");
ZoneId prodZone = ZoneId.from("prod", "us-north-1");
ZoneId testZone = ZoneId.from("test", "us-north-2");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(cluster, prodZone).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(cluster, testZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).in(SystemName.main),
"https:
Endpoint.of(app2).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public),
"https:
Endpoint.of(app2).target(ClusterSpec.Id.from("c2"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
}
} |
Right, I forgot about that condition. Will fix. | public void test_global_endpoints() {
RotationName rotation = RotationName.from("default");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(rotation).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(RotationName.from("r1")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(RotationName.from("r2")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
} | "https: | public void test_global_endpoints() {
RotationName rotation = RotationName.from("default");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(rotation).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(RotationName.from("r1")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(RotationName.from("r2")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
} | class EndpointTest {
private static final ApplicationId app1 = ApplicationId.from("t1", "a1", "default");
private static final ApplicationId app2 = ApplicationId.from("t2", "a2", "i2");
@Test
@Test
public void test_zone_endpoints() {
ClusterSpec.Id cluster = ClusterSpec.Id.from("default");
ZoneId prodZone = ZoneId.from("prod", "us-north-1");
ZoneId testZone = ZoneId.from("test", "us-north-2");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(cluster, prodZone).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(cluster, testZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).in(SystemName.main),
"https:
Endpoint.of(app2).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public),
"https:
Endpoint.of(app2).target(ClusterSpec.Id.from("c2"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
}
} | class EndpointTest {
private static final ApplicationId app1 = ApplicationId.from("t1", "a1", "default");
private static final ApplicationId app2 = ApplicationId.from("t2", "a2", "i2");
@Test
@Test
public void test_zone_endpoints() {
ClusterSpec.Id cluster = ClusterSpec.Id.from("default");
ZoneId prodZone = ZoneId.from("prod", "us-north-1");
ZoneId testZone = ZoneId.from("test", "us-north-2");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(cluster, prodZone).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(cluster, testZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).in(SystemName.main),
"https:
Endpoint.of(app2).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public),
"https:
Endpoint.of(app2).target(ClusterSpec.Id.from("c2"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
}
} |
I'm not sure we should have "global" here, at least we did not include this when it was discussed. Probably my fault as I see I used this in examples on slack. | public void test_global_endpoints() {
RotationName rotation = RotationName.from("default");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(rotation).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(RotationName.from("r1")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(RotationName.from("r2")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
} | "https: | public void test_global_endpoints() {
RotationName rotation = RotationName.from("default");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(rotation).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(RotationName.from("r1")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(RotationName.from("r2")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
} | class EndpointTest {
private static final ApplicationId app1 = ApplicationId.from("t1", "a1", "default");
private static final ApplicationId app2 = ApplicationId.from("t2", "a2", "i2");
@Test
@Test
public void test_zone_endpoints() {
ClusterSpec.Id cluster = ClusterSpec.Id.from("default");
ZoneId prodZone = ZoneId.from("prod", "us-north-1");
ZoneId testZone = ZoneId.from("test", "us-north-2");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(cluster, prodZone).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(cluster, testZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).in(SystemName.main),
"https:
Endpoint.of(app2).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public),
"https:
Endpoint.of(app2).target(ClusterSpec.Id.from("c2"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
}
} | class EndpointTest {
private static final ApplicationId app1 = ApplicationId.from("t1", "a1", "default");
private static final ApplicationId app2 = ApplicationId.from("t2", "a2", "i2");
@Test
@Test
public void test_zone_endpoints() {
ClusterSpec.Id cluster = ClusterSpec.Id.from("default");
ZoneId prodZone = ZoneId.from("prod", "us-north-1");
ZoneId testZone = ZoneId.from("test", "us-north-2");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(cluster, prodZone).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(cluster, testZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).in(SystemName.main),
"https:
Endpoint.of(app2).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public),
"https:
Endpoint.of(app2).target(ClusterSpec.Id.from("c2"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
}
} |
It's science! | private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
} | return (double) ascii / lengthToCheck < 0.95; | private static boolean isBinary(byte[] data) {
if (data.length == 0) return false;
int lengthToCheck = Math.min(data.length, 10000);
int ascii = 0;
for (int i = 0; i < lengthToCheck; i++) {
byte b = data[i];
if (b < 0x9) return true;
if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
else if (b >= 0x20 && b <= 0x7E) ascii++;
}
return (double) ascii / lengthToCheck < 0.95;
} | class ApplicationPackageDiff {
public static String diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static String diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static String diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n";
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return sb.length() == 0 ? "No diff\n" : sb.toString();
}
private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (left.map(entry -> entry.content().isEmpty()).orElse(false) || right.map(entry -> entry.content().isEmpty()).orElse(false))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (leftContent.map(c -> isBinary(c)).or(() -> rightContent.map(c -> isBinary(c))).orElse(false))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (ByteArrayInputStream stream = new ByteArrayInputStream(data);
InputStreamReader streamReader = new InputStreamReader(stream, StandardCharsets.UTF_8);
BufferedReader bufferedReader = new BufferedReader(streamReader)) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
} | class ApplicationPackageDiff {
public static byte[] diffAgainstEmpty(ApplicationPackage right) {
byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return diff(new ApplicationPackage(emptyZip), right);
}
public static byte[] diff(ApplicationPackage left, ApplicationPackage right) {
return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
}
static byte[] diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n".getBytes(StandardCharsets.UTF_8);
Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
StringBuilder sb = new StringBuilder();
List<String> files = Stream.of(leftContents, rightContents)
.flatMap(contents -> contents.keySet().stream())
.sorted()
.distinct()
.collect(Collectors.toList());
for (String file : files) {
if (sb.length() > maxTotalDiffSize)
sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
else
diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
.ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
}
return (sb.length() == 0 ? "No diff\n" : sb.toString()).getBytes(StandardCharsets.UTF_8);
}
private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
return Optional.empty();
if (Stream.of(left, right).flatMap(Optional::stream).anyMatch(entry -> entry.content().isEmpty()))
return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
if (Stream.of(leftContent, rightContent).flatMap(Optional::stream).anyMatch(c -> isBinary(c)))
return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
return LinesComparator.diff(
leftContent.map(c -> lines(c)).orElseGet(List::of),
rightContent.map(c -> lines(c)).orElseGet(List::of))
.map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
}
private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
}
private static List<String> lines(byte[] data) {
List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(data), StandardCharsets.UTF_8))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return lines;
}
} |
Discussed offline: Keep "global" for now and revisit when we change to the final suffix. | public void test_global_endpoints() {
RotationName rotation = RotationName.from("default");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(rotation).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(RotationName.from("r1")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(RotationName.from("r2")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
} | "https: | public void test_global_endpoints() {
RotationName rotation = RotationName.from("default");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(rotation).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(RotationName.from("r1")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(rotation).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app2).target(RotationName.from("r2")).on(Port.tls()).directRouting().in(SystemName.main),
"https:
Endpoint.of(app1).target(rotation).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
} | class EndpointTest {
private static final ApplicationId app1 = ApplicationId.from("t1", "a1", "default");
private static final ApplicationId app2 = ApplicationId.from("t2", "a2", "i2");
@Test
@Test
public void test_zone_endpoints() {
ClusterSpec.Id cluster = ClusterSpec.Id.from("default");
ZoneId prodZone = ZoneId.from("prod", "us-north-1");
ZoneId testZone = ZoneId.from("test", "us-north-2");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(cluster, prodZone).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(cluster, testZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).in(SystemName.main),
"https:
Endpoint.of(app2).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public),
"https:
Endpoint.of(app2).target(ClusterSpec.Id.from("c2"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
}
} | class EndpointTest {
private static final ApplicationId app1 = ApplicationId.from("t1", "a1", "default");
private static final ApplicationId app2 = ApplicationId.from("t2", "a2", "i2");
@Test
@Test
public void test_zone_endpoints() {
ClusterSpec.Id cluster = ClusterSpec.Id.from("default");
ZoneId prodZone = ZoneId.from("prod", "us-north-1");
ZoneId testZone = ZoneId.from("test", "us-north-2");
Map<String, Endpoint> tests = Map.of(
"http:
Endpoint.of(app1).target(cluster, prodZone).on(Port.plain(4080)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).legacy().in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.cd),
"https:
Endpoint.of(app1).target(cluster, testZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).in(SystemName.main),
"https:
Endpoint.of(app2).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main),
"https:
Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public),
"https:
Endpoint.of(app2).target(ClusterSpec.Id.from("c2"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
}
} |
did you intend to do api//user or should this be /api/user? | public void testUserManagement() {
ContainerTester tester = new ContainerTester(container, responseFiles);
assertEquals(SystemName.Public, tester.controller().system());
Roles roles = new Roles(tester.controller().system());
Set<Role> operator = Set.of(roles.hostedOperator());
ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
tester.assertResponse(request("/application/v4/"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/api/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.data("{\"token\":\"hello\"}"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.roles(operator)
.user("owner@tenant")
.data("{\"token\":\"hello\"}"),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/user/", PUT)
.roles(operator),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"Not authenticated or not a user.\"}", 403);
tester.assertResponse(request("/user/v1/"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"evil@evil\",\"roleName\":\"hostedOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'hostedOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is now a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOperator(id.tenant())))
.data("{\"user\":\"admin@tenant\",\"roleName\":\"tenantAdmin\"}"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOwner(TenantName.from("my-tenant"))))
.data("{\"user\":\"admin@app\",\"roleName\":\"applicationAdmin\"}"),
"{\"error-code\":\"INTERNAL_SERVER_ERROR\",\"message\":\"NullPointerException\"}", 500);
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", POST)
.user("operator@tenant")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-created.json"));
tester.assertResponse(request("/application/v4/tenant/other-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOperator(id.tenant()))),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"reader@app\",\"roleName\":\"applicationReader\"}"),
"{\"message\":\"user 'reader@app' is now a member of role 'applicationReader' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.hostedOperator()))
.data("{\"user\":\"reader@app\",\"roleName\":\"tenantOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'tenantOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant")
.roles(Set.of(roles.applicationReader(id.tenant(), id.application()))),
new File("tenant-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("api
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"applicationAdmin\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'applicationAdmin' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application()))),
"");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(operator)
.data("{\"user\":\"owner@tenant\",\"roleName\":\"tenantOwner\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Can't remove the last owner of a tenant.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantOwner(id.tenant()))),
new File("tenant-without-applications.json"));
} | tester.assertResponse(request("api | public void testUserManagement() {
ContainerTester tester = new ContainerTester(container, responseFiles);
assertEquals(SystemName.Public, tester.controller().system());
Roles roles = new Roles(tester.controller().system());
Set<Role> operator = Set.of(roles.hostedOperator());
ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
tester.assertResponse(request("/application/v4/"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/api/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.data("{\"token\":\"hello\"}"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.roles(operator)
.user("owner@tenant")
.data("{\"token\":\"hello\"}"),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/user/", PUT)
.roles(operator),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"Not authenticated or not a user.\"}", 403);
tester.assertResponse(request("/user/v1/"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"evil@evil\",\"roleName\":\"hostedOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'hostedOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is now a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOperator(id.tenant())))
.data("{\"user\":\"admin@tenant\",\"roleName\":\"tenantAdmin\"}"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOwner(TenantName.from("my-tenant"))))
.data("{\"user\":\"admin@app\",\"roleName\":\"applicationAdmin\"}"),
"{\"error-code\":\"INTERNAL_SERVER_ERROR\",\"message\":\"NullPointerException\"}", 500);
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", POST)
.user("operator@tenant")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-created.json"));
tester.assertResponse(request("/application/v4/tenant/other-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOperator(id.tenant()))),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"reader@app\",\"roleName\":\"applicationReader\"}"),
"{\"message\":\"user 'reader@app' is now a member of role 'applicationReader' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.hostedOperator()))
.data("{\"user\":\"reader@app\",\"roleName\":\"tenantOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'tenantOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant")
.roles(Set.of(roles.applicationReader(id.tenant(), id.application()))),
new File("tenant-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/api/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"applicationAdmin\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'applicationAdmin' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application()))),
"");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(operator)
.data("{\"user\":\"owner@tenant\",\"roleName\":\"tenantOwner\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Can't remove the last owner of a tenant.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantOwner(id.tenant()))),
new File("tenant-without-applications.json"));
} | class UserApiTest extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/";
@Test
} | class UserApiTest extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/";
@Test
} |
What, how could this pass? | public void testUserManagement() {
ContainerTester tester = new ContainerTester(container, responseFiles);
assertEquals(SystemName.Public, tester.controller().system());
Roles roles = new Roles(tester.controller().system());
Set<Role> operator = Set.of(roles.hostedOperator());
ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
tester.assertResponse(request("/application/v4/"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/api/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.data("{\"token\":\"hello\"}"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.roles(operator)
.user("owner@tenant")
.data("{\"token\":\"hello\"}"),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/user/", PUT)
.roles(operator),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"Not authenticated or not a user.\"}", 403);
tester.assertResponse(request("/user/v1/"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"evil@evil\",\"roleName\":\"hostedOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'hostedOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is now a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOperator(id.tenant())))
.data("{\"user\":\"admin@tenant\",\"roleName\":\"tenantAdmin\"}"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOwner(TenantName.from("my-tenant"))))
.data("{\"user\":\"admin@app\",\"roleName\":\"applicationAdmin\"}"),
"{\"error-code\":\"INTERNAL_SERVER_ERROR\",\"message\":\"NullPointerException\"}", 500);
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", POST)
.user("operator@tenant")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-created.json"));
tester.assertResponse(request("/application/v4/tenant/other-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOperator(id.tenant()))),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"reader@app\",\"roleName\":\"applicationReader\"}"),
"{\"message\":\"user 'reader@app' is now a member of role 'applicationReader' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.hostedOperator()))
.data("{\"user\":\"reader@app\",\"roleName\":\"tenantOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'tenantOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant")
.roles(Set.of(roles.applicationReader(id.tenant(), id.application()))),
new File("tenant-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("api
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"applicationAdmin\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'applicationAdmin' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application()))),
"");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(operator)
.data("{\"user\":\"owner@tenant\",\"roleName\":\"tenantOwner\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Can't remove the last owner of a tenant.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantOwner(id.tenant()))),
new File("tenant-without-applications.json"));
} | tester.assertResponse(request("api | public void testUserManagement() {
ContainerTester tester = new ContainerTester(container, responseFiles);
assertEquals(SystemName.Public, tester.controller().system());
Roles roles = new Roles(tester.controller().system());
Set<Role> operator = Set.of(roles.hostedOperator());
ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
tester.assertResponse(request("/application/v4/"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/api/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.data("{\"token\":\"hello\"}"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.roles(operator)
.user("owner@tenant")
.data("{\"token\":\"hello\"}"),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/user/", PUT)
.roles(operator),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"Not authenticated or not a user.\"}", 403);
tester.assertResponse(request("/user/v1/"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"evil@evil\",\"roleName\":\"hostedOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'hostedOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is now a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOperator(id.tenant())))
.data("{\"user\":\"admin@tenant\",\"roleName\":\"tenantAdmin\"}"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOwner(TenantName.from("my-tenant"))))
.data("{\"user\":\"admin@app\",\"roleName\":\"applicationAdmin\"}"),
"{\"error-code\":\"INTERNAL_SERVER_ERROR\",\"message\":\"NullPointerException\"}", 500);
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", POST)
.user("operator@tenant")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-created.json"));
tester.assertResponse(request("/application/v4/tenant/other-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOperator(id.tenant()))),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"reader@app\",\"roleName\":\"applicationReader\"}"),
"{\"message\":\"user 'reader@app' is now a member of role 'applicationReader' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.hostedOperator()))
.data("{\"user\":\"reader@app\",\"roleName\":\"tenantOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'tenantOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant")
.roles(Set.of(roles.applicationReader(id.tenant(), id.application()))),
new File("tenant-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/api/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"applicationAdmin\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'applicationAdmin' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application()))),
"");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(operator)
.data("{\"user\":\"owner@tenant\",\"roleName\":\"tenantOwner\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Can't remove the last owner of a tenant.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantOwner(id.tenant()))),
new File("tenant-without-applications.json"));
} | class UserApiTest extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/";
@Test
} | class UserApiTest extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/";
@Test
} |
Some normalisation is probably done, then. No, I didn't intend for this. | public void testUserManagement() {
ContainerTester tester = new ContainerTester(container, responseFiles);
assertEquals(SystemName.Public, tester.controller().system());
Roles roles = new Roles(tester.controller().system());
Set<Role> operator = Set.of(roles.hostedOperator());
ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
tester.assertResponse(request("/application/v4/"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/api/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.data("{\"token\":\"hello\"}"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.roles(operator)
.user("owner@tenant")
.data("{\"token\":\"hello\"}"),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/user/", PUT)
.roles(operator),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"Not authenticated or not a user.\"}", 403);
tester.assertResponse(request("/user/v1/"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"evil@evil\",\"roleName\":\"hostedOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'hostedOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is now a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOperator(id.tenant())))
.data("{\"user\":\"admin@tenant\",\"roleName\":\"tenantAdmin\"}"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOwner(TenantName.from("my-tenant"))))
.data("{\"user\":\"admin@app\",\"roleName\":\"applicationAdmin\"}"),
"{\"error-code\":\"INTERNAL_SERVER_ERROR\",\"message\":\"NullPointerException\"}", 500);
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", POST)
.user("operator@tenant")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-created.json"));
tester.assertResponse(request("/application/v4/tenant/other-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOperator(id.tenant()))),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"reader@app\",\"roleName\":\"applicationReader\"}"),
"{\"message\":\"user 'reader@app' is now a member of role 'applicationReader' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.hostedOperator()))
.data("{\"user\":\"reader@app\",\"roleName\":\"tenantOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'tenantOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant")
.roles(Set.of(roles.applicationReader(id.tenant(), id.application()))),
new File("tenant-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("api
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"applicationAdmin\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'applicationAdmin' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application()))),
"");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(operator)
.data("{\"user\":\"owner@tenant\",\"roleName\":\"tenantOwner\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Can't remove the last owner of a tenant.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantOwner(id.tenant()))),
new File("tenant-without-applications.json"));
} | tester.assertResponse(request("api | public void testUserManagement() {
ContainerTester tester = new ContainerTester(container, responseFiles);
assertEquals(SystemName.Public, tester.controller().system());
Roles roles = new Roles(tester.controller().system());
Set<Role> operator = Set.of(roles.hostedOperator());
ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
tester.assertResponse(request("/application/v4/"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/api/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.data("{\"token\":\"hello\"}"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.roles(operator)
.user("owner@tenant")
.data("{\"token\":\"hello\"}"),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/user/", PUT)
.roles(operator),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"Not authenticated or not a user.\"}", 403);
tester.assertResponse(request("/user/v1/"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"evil@evil\",\"roleName\":\"hostedOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'hostedOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is now a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOperator(id.tenant())))
.data("{\"user\":\"admin@tenant\",\"roleName\":\"tenantAdmin\"}"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOwner(TenantName.from("my-tenant"))))
.data("{\"user\":\"admin@app\",\"roleName\":\"applicationAdmin\"}"),
"{\"error-code\":\"INTERNAL_SERVER_ERROR\",\"message\":\"NullPointerException\"}", 500);
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", POST)
.user("operator@tenant")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-created.json"));
tester.assertResponse(request("/application/v4/tenant/other-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOperator(id.tenant()))),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"reader@app\",\"roleName\":\"applicationReader\"}"),
"{\"message\":\"user 'reader@app' is now a member of role 'applicationReader' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.hostedOperator()))
.data("{\"user\":\"reader@app\",\"roleName\":\"tenantOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'tenantOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant")
.roles(Set.of(roles.applicationReader(id.tenant(), id.application()))),
new File("tenant-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/api/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"applicationAdmin\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'applicationAdmin' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application()))),
"");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(operator)
.data("{\"user\":\"owner@tenant\",\"roleName\":\"tenantOwner\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Can't remove the last owner of a tenant.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantOwner(id.tenant()))),
new File("tenant-without-applications.json"));
} | class UserApiTest extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/";
@Test
} | class UserApiTest extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/";
@Test
} |
So this was interpreted as just `/user` — the `8080api` port was interpreted as port 8080. Nice. | public void testUserManagement() {
ContainerTester tester = new ContainerTester(container, responseFiles);
assertEquals(SystemName.Public, tester.controller().system());
Roles roles = new Roles(tester.controller().system());
Set<Role> operator = Set.of(roles.hostedOperator());
ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
tester.assertResponse(request("/application/v4/"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/api/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.data("{\"token\":\"hello\"}"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.roles(operator)
.user("owner@tenant")
.data("{\"token\":\"hello\"}"),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/user/", PUT)
.roles(operator),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"Not authenticated or not a user.\"}", 403);
tester.assertResponse(request("/user/v1/"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"evil@evil\",\"roleName\":\"hostedOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'hostedOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is now a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOperator(id.tenant())))
.data("{\"user\":\"admin@tenant\",\"roleName\":\"tenantAdmin\"}"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOwner(TenantName.from("my-tenant"))))
.data("{\"user\":\"admin@app\",\"roleName\":\"applicationAdmin\"}"),
"{\"error-code\":\"INTERNAL_SERVER_ERROR\",\"message\":\"NullPointerException\"}", 500);
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", POST)
.user("operator@tenant")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-created.json"));
tester.assertResponse(request("/application/v4/tenant/other-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOperator(id.tenant()))),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"reader@app\",\"roleName\":\"applicationReader\"}"),
"{\"message\":\"user 'reader@app' is now a member of role 'applicationReader' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.hostedOperator()))
.data("{\"user\":\"reader@app\",\"roleName\":\"tenantOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'tenantOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant")
.roles(Set.of(roles.applicationReader(id.tenant(), id.application()))),
new File("tenant-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("api
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"applicationAdmin\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'applicationAdmin' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application()))),
"");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(operator)
.data("{\"user\":\"owner@tenant\",\"roleName\":\"tenantOwner\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Can't remove the last owner of a tenant.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantOwner(id.tenant()))),
new File("tenant-without-applications.json"));
} | tester.assertResponse(request("api | public void testUserManagement() {
ContainerTester tester = new ContainerTester(container, responseFiles);
assertEquals(SystemName.Public, tester.controller().system());
Roles roles = new Roles(tester.controller().system());
Set<Role> operator = Set.of(roles.hostedOperator());
ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
tester.assertResponse(request("/application/v4/"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/api/application/v4/tenant")
.roles(operator),
"[]");
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.data("{\"token\":\"hello\"}"),
accessDenied, 403);
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.roles(operator)
.user("owner@tenant")
.data("{\"token\":\"hello\"}"),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/user/", PUT)
.roles(operator),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"Not authenticated or not a user.\"}", 403);
tester.assertResponse(request("/user/v1/"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"evil@evil\",\"roleName\":\"hostedOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'hostedOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOwner(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is now a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", POST)
.roles(Set.of(roles.tenantOperator(id.tenant())))
.data("{\"user\":\"admin@tenant\",\"roleName\":\"tenantAdmin\"}"),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOwner(TenantName.from("my-tenant"))))
.data("{\"user\":\"admin@app\",\"roleName\":\"applicationAdmin\"}"),
"{\"error-code\":\"INTERNAL_SERVER_ERROR\",\"message\":\"NullPointerException\"}", 500);
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", POST)
.user("operator@tenant")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-created.json"));
tester.assertResponse(request("/application/v4/tenant/other-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantOperator(id.tenant()))),
accessDenied, 403);
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"reader@app\",\"roleName\":\"applicationReader\"}"),
"{\"message\":\"user 'reader@app' is now a member of role 'applicationReader' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", POST)
.roles(Set.of(roles.hostedOperator()))
.data("{\"user\":\"reader@app\",\"roleName\":\"tenantOperator\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Malformed or illegal role name 'tenantOperator'.\"}", 400);
tester.assertResponse(request("/user/v1/tenant/my-tenant")
.roles(Set.of(roles.applicationReader(id.tenant(), id.application()))),
new File("tenant-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/api/user/v1/tenant/my-tenant/application/my-app")
.roles(Set.of(roles.tenantOperator(id.tenant()))),
new File("application-roles.json"));
tester.assertResponse(request("/user/v1/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"applicationAdmin\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'applicationAdmin' of 'my-app' owned by 'my-tenant'\"}");
tester.assertResponse(request("/application/v4/tenant/my-tenant/application/my-app", DELETE)
.roles(Set.of(roles.applicationAdmin(id.tenant(), id.application()))),
"");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantAdmin(id.tenant())))
.data("{\"user\":\"operator@tenant\",\"roleName\":\"tenantOperator\"}"),
"{\"message\":\"user 'operator@tenant' is no longer a member of role 'tenantOperator' of 'my-tenant'\"}");
tester.assertResponse(request("/user/v1/tenant/my-tenant", DELETE)
.roles(operator)
.data("{\"user\":\"owner@tenant\",\"roleName\":\"tenantOwner\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Can't remove the last owner of a tenant.\"}", 400);
tester.assertResponse(request("/application/v4/tenant/my-tenant", DELETE)
.roles(Set.of(roles.tenantOwner(id.tenant()))),
new File("tenant-without-applications.json"));
} | class UserApiTest extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/";
@Test
} | class UserApiTest extends ControllerContainerCloudTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/";
@Test
} |
all `valueOf` methods: default should just return "unknown" so the API doesn't break if we add types. | private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
} | default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); | private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case PUT: return handlePUT(request);
case POST: return handlePOST(request);
case PATCH: return handlePATCH(request);
case DELETE: return handleDELETE(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return ErrorResponse.from(e);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/user")) return authenticatedUser(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/user")) return createUser(request);
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}"))
return setMajorVersion(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyJsonResponse response = new EmptyJsonResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "user", "tenant");
}
private HttpResponse authenticatedUser(HttpRequest request) {
Principal user = requireUserPrincipal(request);
if (user == null)
throw new NotAuthorizedException("You must be authenticated.");
String userName = user instanceof AthenzPrincipal ? ((AthenzPrincipal) user).getIdentity().getName() : user.getName();
TenantName tenantName = TenantName.from(UserTenant.normalizeUser(userName));
List<Tenant> tenants = controller.tenants().asList(new Credentials(user));
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setString("user", userName);
Cursor tenantsArray = response.setArray("tenants");
for (Tenant tenant : tenants)
tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject());
response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant.name().equals(tenantName)));
return new SlimeJsonResponse(slime);
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Slime slime = new Slime();
Cursor array = slime.setArray();
for (Application application : controller.applications().asList(tenant))
toSlime(application, array.addObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse setMajorVersion(String tenantName, String applicationName, HttpRequest request) {
Application application = getApplication(tenantName, applicationName);
Inspector majorVersionField = toSlime(request.getData()).get().field("majorVersion");
if ( ! majorVersionField.valid())
throw new IllegalArgumentException("Request body must contain a majorVersion field");
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int)majorVersionField.asLong();
controller.applications().lockIfPresent(application.id(),
a -> controller.applications().store(a.withMajorVersion(majorVersion)));
return new MessageResponse("Set major version to " + ( majorVersion == null ? "empty" : majorVersion));
}
private Application getApplication(String tenantName, String applicationName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
return controller.applications().get(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
List<Node> nodes = controller.configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.canonicalFlavor());
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case unorchestrated: return "unorchestrated";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
if (queryParameters.containsKey("streaming")) {
InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters);
Slime slime = new Slime();
Cursor object = slime.setObject();
if (response.isPresent()) {
response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
String triggered = controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName())
.stream().map(JobType::jobName).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + application.id().instance().value() + "/job/",
request.getUri()).toString());
application.deploymentJobs().statusOf(JobType.component)
.flatMap(JobStatus::lastSuccess)
.map(run -> run.application().source())
.ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source")));
application.deploymentJobs().projectId()
.ifPresent(id -> object.setLong("projectId", id));
if ( ! application.change().isEmpty()) {
toSlime(object.setObject("deploying"), application.change());
}
if ( ! application.outstandingChange().isEmpty()) {
toSlime(object.setObject("outstandingChange"), application.outstandingChange());
}
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedJobs(application.deploymentJobs().jobStatus().values());
object.setBool("deployedInternally", application.deploymentJobs().deployedInternally());
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
});
object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor globalRotationsArray = object.setArray("globalRotations");
application.globalDnsName(controller.system()).ifPresent(rotation -> {
globalRotationsArray.addString(rotation.url().toString());
globalRotationsArray.addString(rotation.secureUrl().toString());
globalRotationsArray.addString(rotation.oathUrl().toString());
object.setString("rotationId", application.rotation().get().asString());
});
Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id());
for (RoutingPolicy policy : routingPolicies) {
for (RotationName rotation : policy.rotations()) {
GlobalDnsName dnsName = new GlobalDnsName(application.id(), controller.system(), rotation);
globalRotationsArray.addString(dnsName.oathUrl().toString());
}
}
List<Deployment> deployments = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedDeployments(application.deployments().values());
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) {
toSlime(application.rotationStatus(deployment), deploymentObject);
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", application.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value() +
"/instance/" + application.id().instance().value(),
request.getUri()).toString());
}
}
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().get(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(application.id(),
ZoneId.from(environment, region));
Deployment deployment = application.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
Cursor serviceUrlArray = response.setArray("serviceUrls");
controller.applications().getDeploymentEndpoints(deploymentId)
.ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId()
.ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentCost appCost = deployment.calculateCost();
Cursor costObject = response.setObject("cost");
toSlime(appCost, costObject);
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if (!applicationVersion.isUnknown()) {
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if ( ! revision.isPresent()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationStatus status, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", status.name().toUpperCase());
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Inspector requestData = toSlime(request.getData()).get();
String reason = mandatory("reason", requestData).asString();
String agent = requireUserPrincipal(request).getName();
long timestamp = controller.clock().instant().getEpochSecond();
EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp);
controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()),
endpointStatus);
return new MessageResponse(String.format("Successfully set %s in %s.%s %s service",
application.id().toShortString(),
deployment.zone().environment().value(),
deployment.zone().region().value(),
inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId);
for (RoutingEndpoint endpoint : status.keySet()) {
EndpointStatus currentStatus = status.get(endpoint);
array.addString(endpoint.upstreamName());
Cursor statusObject = array.addObject();
statusObject.setString("status", currentStatus.getStatus().name());
statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason());
statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent());
statusObject.setLong("timestamp", currentStatus.getEpoch());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().require(applicationId);
ZoneId zone = ZoneId.from(environment, region);
if (!application.rotation().isPresent()) {
throw new NotExistsException("global rotation does not exist for " + application);
}
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(application.rotationStatus(deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenant, String application, HttpRequest request) {
Application app = controller.applications().require(ApplicationId.from(tenant, application, "default"));
Slime slime = new Slime();
Cursor root = slime.setObject();
if (!app.change().isEmpty()) {
app.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", app.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse createUser(HttpRequest request) {
String user = Optional.of(requireUserPrincipal(request))
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzIdentity::getName)
.map(UserTenant::normalizeUser)
.orElseThrow(() -> new ForbiddenException("Not authenticated or not a user."));
UserTenant tenant = UserTenant.create(user);
try {
controller.tenants().createUser(tenant);
return new MessageResponse("Created user '" + user + "'");
} catch (AlreadyExistsException e) {
return new MessageResponse("User '" + user + "' already exists");
}
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
try {
Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user
? Optional.empty()
: Optional.of(accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()));
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(application, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
catch (ZmsClientException e) {
if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN)
throw new ForbiddenException("Not authorized to create application", e);
else
throw e;
}
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = application.get().change();
if (change.isEmpty()) {
response.append("No deployment in progress for " + application + " at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '" + change + "' to '" +
controller.applications().require(id).change() + "' for " + application);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the zone application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
/*
* Deploy direct is when we want to redeploy the current application - retrieve version
* info from the application package before deploying
*/
if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) {
Optional<Deployment> deployment = controller.applications().get(applicationId)
.map(Application::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(!deployment.isPresent())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass,
Optional.of(requireUserPrincipal(request)));
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if ( ! tenant.isPresent())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
if (tenant.get().type() == Tenant.Type.user)
controller.tenants().deleteUser((UserTenant) tenant.get());
else
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user
? Optional.empty()
: Optional.of(accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()));
controller.applications().deleteApplication(id, credentials);
return new EmptyJsonResponse();
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
controller.applications().deactivate(application.id(), ZoneId.from(environment, region));
return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
/**
* Promote application Chef environments. To be used by component jobs only
*/
private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) {
try{
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.systemChefEnvironment();
String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
/**
* Promote application Chef environments for jobs that deploy applications
*/
private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) {
try {
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) {
try {
DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get());
if ( report.jobType() == JobType.component
&& controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally())
throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " +
"longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.");
controller.applications().deploymentTrigger().notifyOfCompletion(report);
return new MessageResponse("ok");
} catch (IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
}
private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) {
Optional<DeploymentJobs.JobError> jobError = Optional.empty();
if (report.field("jobError").valid()) {
jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString()));
}
ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString());
JobType type = JobType.fromJobName(report.field("jobName").asString());
long buildNumber = report.field("buildNumber").asLong();
if (type == JobType.component)
return DeploymentJobs.JobReport.ofComponent(id,
report.field("projectId").asLong(),
buildNumber,
jobError,
toSourceRevision(report.field("sourceRevision")));
else
return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError);
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tentantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case user: break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : controller.applications().asList(tenant.name())) {
if (application.id().instance().isDefault()) {
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), application, request);
else
toSlime(application, applicationArray.addObject(), request);
}
}
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tentantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case user: break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(JobStatus.JobRun jobRun, Cursor object) {
object.setLong("id", jobRun.id());
object.setString("version", jobRun.platform().toFullString());
if (!jobRun.application().isUnknown())
toSlime(jobRun.application(), object.setObject("revision"));
object.setString("reason", jobRun.reason());
object.setLong("at", jobRun.at().toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(Application application, Cursor object, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(), request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
public static void toSlime(DeploymentCost deploymentCost, Cursor object) {
object.setLong("tco", (long)deploymentCost.getTco());
object.setLong("waste", (long)deploymentCost.getWaste());
object.setDouble("utilization", deploymentCost.getUtilization());
Cursor clustersObject = object.setObject("cluster");
for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet())
toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey()));
}
private static void toSlime(ClusterCost clusterCost, Cursor object) {
object.setLong("count", clusterCost.getClusterInfo().getHostnames().size());
object.setString("resource", getResourceName(clusterCost.getResultUtilization()));
object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization());
object.setLong("tco", (int)clusterCost.getTco());
object.setLong("waste", (int)clusterCost.getWaste());
object.setString("flavor", clusterCost.getClusterInfo().getFlavor());
object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost());
object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU());
object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem());
object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk());
object.setString("type", clusterCost.getClusterInfo().getClusterType().name());
Cursor utilObject = object.setObject("util");
utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu());
utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory());
utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk());
utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy());
Cursor usageObject = object.setObject("usage");
usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu());
usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory());
usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk());
usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy());
Cursor hostnamesArray = object.setArray("hostnames");
for (String hostname : clusterCost.getClusterInfo().getHostnames())
hostnamesArray.addString(hostname);
}
private static String getResourceName(ClusterUtilization utilization) {
String name = "cpu";
double max = utilization.getMaxUtilization();
if (utilization.getMemory() == max) {
name = "mem";
} else if (utilization.getDisk() == max) {
name = "disk";
} else if (utilization.getDiskBusy() == max) {
name = "diskbusy";
}
return name;
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static String tentantType(Tenant tenant) {
switch (tenant.type()) {
case user: return "USER";
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
SourceRevision sourceRevision = toSourceRevision(submitOptions);
String authorEmail = submitOptions.field("authorEmail").asString();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case PUT: return handlePUT(request);
case POST: return handlePOST(request);
case PATCH: return handlePATCH(request);
case DELETE: return handleDELETE(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return ErrorResponse.from(e);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/user")) return authenticatedUser(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/user")) return createUser(request);
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}"))
return setMajorVersion(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyJsonResponse response = new EmptyJsonResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "user", "tenant");
}
private HttpResponse authenticatedUser(HttpRequest request) {
Principal user = requireUserPrincipal(request);
if (user == null)
throw new NotAuthorizedException("You must be authenticated.");
String userName = user instanceof AthenzPrincipal ? ((AthenzPrincipal) user).getIdentity().getName() : user.getName();
TenantName tenantName = TenantName.from(UserTenant.normalizeUser(userName));
List<Tenant> tenants = controller.tenants().asList(new Credentials(user));
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setString("user", userName);
Cursor tenantsArray = response.setArray("tenants");
for (Tenant tenant : tenants)
tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject());
response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant.name().equals(tenantName)));
return new SlimeJsonResponse(slime);
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Slime slime = new Slime();
Cursor array = slime.setArray();
for (Application application : controller.applications().asList(tenant))
toSlime(application, array.addObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse setMajorVersion(String tenantName, String applicationName, HttpRequest request) {
Application application = getApplication(tenantName, applicationName);
Inspector majorVersionField = toSlime(request.getData()).get().field("majorVersion");
if ( ! majorVersionField.valid())
throw new IllegalArgumentException("Request body must contain a majorVersion field");
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int)majorVersionField.asLong();
controller.applications().lockIfPresent(application.id(),
a -> controller.applications().store(a.withMajorVersion(majorVersion)));
return new MessageResponse("Set major version to " + ( majorVersion == null ? "empty" : majorVersion));
}
private Application getApplication(String tenantName, String applicationName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
return controller.applications().get(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
List<Node> nodes = controller.configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.canonicalFlavor());
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case unorchestrated: return "unorchestrated";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
if (queryParameters.containsKey("streaming")) {
InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters);
Slime slime = new Slime();
Cursor object = slime.setObject();
if (response.isPresent()) {
response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
String triggered = controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName())
.stream().map(JobType::jobName).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + application.id().instance().value() + "/job/",
request.getUri()).toString());
application.deploymentJobs().statusOf(JobType.component)
.flatMap(JobStatus::lastSuccess)
.map(run -> run.application().source())
.ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source")));
application.deploymentJobs().projectId()
.ifPresent(id -> object.setLong("projectId", id));
if ( ! application.change().isEmpty()) {
toSlime(object.setObject("deploying"), application.change());
}
if ( ! application.outstandingChange().isEmpty()) {
toSlime(object.setObject("outstandingChange"), application.outstandingChange());
}
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedJobs(application.deploymentJobs().jobStatus().values());
object.setBool("deployedInternally", application.deploymentJobs().deployedInternally());
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
});
object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor globalRotationsArray = object.setArray("globalRotations");
application.globalDnsName(controller.system()).ifPresent(rotation -> {
globalRotationsArray.addString(rotation.url().toString());
globalRotationsArray.addString(rotation.secureUrl().toString());
globalRotationsArray.addString(rotation.oathUrl().toString());
object.setString("rotationId", application.rotation().get().asString());
});
Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id());
for (RoutingPolicy policy : routingPolicies) {
for (RotationName rotation : policy.rotations()) {
GlobalDnsName dnsName = new GlobalDnsName(application.id(), controller.system(), rotation);
globalRotationsArray.addString(dnsName.oathUrl().toString());
}
}
List<Deployment> deployments = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedDeployments(application.deployments().values());
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) {
toSlime(application.rotationStatus(deployment), deploymentObject);
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", application.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value() +
"/instance/" + application.id().instance().value(),
request.getUri()).toString());
}
}
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().get(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(application.id(),
ZoneId.from(environment, region));
Deployment deployment = application.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
Cursor serviceUrlArray = response.setArray("serviceUrls");
controller.applications().getDeploymentEndpoints(deploymentId)
.ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId()
.ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentCost appCost = deployment.calculateCost();
Cursor costObject = response.setObject("cost");
toSlime(appCost, costObject);
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if (!applicationVersion.isUnknown()) {
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if ( ! revision.isPresent()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationStatus status, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", status.name().toUpperCase());
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Inspector requestData = toSlime(request.getData()).get();
String reason = mandatory("reason", requestData).asString();
String agent = requireUserPrincipal(request).getName();
long timestamp = controller.clock().instant().getEpochSecond();
EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp);
controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()),
endpointStatus);
return new MessageResponse(String.format("Successfully set %s in %s.%s %s service",
application.id().toShortString(),
deployment.zone().environment().value(),
deployment.zone().region().value(),
inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId);
for (RoutingEndpoint endpoint : status.keySet()) {
EndpointStatus currentStatus = status.get(endpoint);
array.addString(endpoint.upstreamName());
Cursor statusObject = array.addObject();
statusObject.setString("status", currentStatus.getStatus().name());
statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason());
statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent());
statusObject.setLong("timestamp", currentStatus.getEpoch());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().require(applicationId);
ZoneId zone = ZoneId.from(environment, region);
if (!application.rotation().isPresent()) {
throw new NotExistsException("global rotation does not exist for " + application);
}
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(application.rotationStatus(deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenant, String application, HttpRequest request) {
Application app = controller.applications().require(ApplicationId.from(tenant, application, "default"));
Slime slime = new Slime();
Cursor root = slime.setObject();
if (!app.change().isEmpty()) {
app.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", app.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse createUser(HttpRequest request) {
String user = Optional.of(requireUserPrincipal(request))
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzIdentity::getName)
.map(UserTenant::normalizeUser)
.orElseThrow(() -> new ForbiddenException("Not authenticated or not a user."));
UserTenant tenant = UserTenant.create(user);
try {
controller.tenants().createUser(tenant);
return new MessageResponse("Created user '" + user + "'");
} catch (AlreadyExistsException e) {
return new MessageResponse("User '" + user + "' already exists");
}
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
try {
Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user
? Optional.empty()
: Optional.of(accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()));
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(application, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
catch (ZmsClientException e) {
if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN)
throw new ForbiddenException("Not authorized to create application", e);
else
throw e;
}
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = application.get().change();
if (change.isEmpty()) {
response.append("No deployment in progress for " + application + " at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '" + change + "' to '" +
controller.applications().require(id).change() + "' for " + application);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the zone application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
/*
* Deploy direct is when we want to redeploy the current application - retrieve version
* info from the application package before deploying
*/
if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) {
Optional<Deployment> deployment = controller.applications().get(applicationId)
.map(Application::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(!deployment.isPresent())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass,
Optional.of(requireUserPrincipal(request)));
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if ( ! tenant.isPresent())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
if (tenant.get().type() == Tenant.Type.user)
controller.tenants().deleteUser((UserTenant) tenant.get());
else
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user
? Optional.empty()
: Optional.of(accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()));
controller.applications().deleteApplication(id, credentials);
return new EmptyJsonResponse();
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
controller.applications().deactivate(application.id(), ZoneId.from(environment, region));
return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
/**
* Promote application Chef environments. To be used by component jobs only
*/
private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) {
try{
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.systemChefEnvironment();
String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
/**
* Promote application Chef environments for jobs that deploy applications
*/
private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) {
try {
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) {
try {
DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get());
if ( report.jobType() == JobType.component
&& controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally())
throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " +
"longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.");
controller.applications().deploymentTrigger().notifyOfCompletion(report);
return new MessageResponse("ok");
} catch (IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
}
private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) {
Optional<DeploymentJobs.JobError> jobError = Optional.empty();
if (report.field("jobError").valid()) {
jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString()));
}
ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString());
JobType type = JobType.fromJobName(report.field("jobName").asString());
long buildNumber = report.field("buildNumber").asLong();
if (type == JobType.component)
return DeploymentJobs.JobReport.ofComponent(id,
report.field("projectId").asLong(),
buildNumber,
jobError,
toSourceRevision(report.field("sourceRevision")));
else
return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError);
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tentantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case user: break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : controller.applications().asList(tenant.name())) {
if (application.id().instance().isDefault()) {
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), application, request);
else
toSlime(application, applicationArray.addObject(), request);
}
}
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tentantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case user: break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(JobStatus.JobRun jobRun, Cursor object) {
object.setLong("id", jobRun.id());
object.setString("version", jobRun.platform().toFullString());
if (!jobRun.application().isUnknown())
toSlime(jobRun.application(), object.setObject("revision"));
object.setString("reason", jobRun.reason());
object.setLong("at", jobRun.at().toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(Application application, Cursor object, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(), request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
public static void toSlime(DeploymentCost deploymentCost, Cursor object) {
object.setLong("tco", (long)deploymentCost.getTco());
object.setLong("waste", (long)deploymentCost.getWaste());
object.setDouble("utilization", deploymentCost.getUtilization());
Cursor clustersObject = object.setObject("cluster");
for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet())
toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey()));
}
private static void toSlime(ClusterCost clusterCost, Cursor object) {
object.setLong("count", clusterCost.getClusterInfo().getHostnames().size());
object.setString("resource", getResourceName(clusterCost.getResultUtilization()));
object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization());
object.setLong("tco", (int)clusterCost.getTco());
object.setLong("waste", (int)clusterCost.getWaste());
object.setString("flavor", clusterCost.getClusterInfo().getFlavor());
object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost());
object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU());
object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem());
object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk());
object.setString("type", clusterCost.getClusterInfo().getClusterType().name());
Cursor utilObject = object.setObject("util");
utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu());
utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory());
utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk());
utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy());
Cursor usageObject = object.setObject("usage");
usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu());
usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory());
usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk());
usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy());
Cursor hostnamesArray = object.setArray("hostnames");
for (String hostname : clusterCost.getClusterInfo().getHostnames())
hostnamesArray.addString(hostname);
}
private static String getResourceName(ClusterUtilization utilization) {
String name = "cpu";
double max = utilization.getMaxUtilization();
if (utilization.getMemory() == max) {
name = "mem";
} else if (utilization.getDisk() == max) {
name = "disk";
} else if (utilization.getDiskBusy() == max) {
name = "diskbusy";
}
return name;
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static String tentantType(Tenant tenant) {
switch (tenant.type()) {
case user: return "USER";
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
SourceRevision sourceRevision = toSourceRevision(submitOptions);
String authorEmail = submitOptions.field("authorEmail").asString();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
} |
Shouldn't this be `parts[6].replaceAll("\\n", "\n").replaceAll("\\t", "\t")));`? | private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
ZoneId zone = id.type().zone(controller.system());
logger.log("Copying Vespa log from nodes of " + id.application() + " in " + zone + " ...");
try {
List<LogEntry> entries = new ArrayList<>();
String logs = IOUtils.readAll(controller.configServer().getLogStream(new DeploymentId(id.application(), zone),
Collections.emptyMap()),
StandardCharsets.UTF_8);
for (String line : logs.split("\n")) {
String[] parts = line.split("\t");
if (parts.length != 7) continue;
entries.add(new LogEntry(0,
(long) (Double.parseDouble(parts[0]) * 1000),
LogEntry.typeOf(LogLevel.parse(parts[5])),
parts[1] + '\t' + parts[3] + '\t' + parts[4] + '\n' +
parts[6].replaceAll("\\\\n", "\n")
.replaceAll("\\\\t", "\t")));
}
controller.jobController().log(id, Step.copyVespaLogs, entries);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
}
return Optional.of(running);
} | .replaceAll("\\\\t", "\t"))); | private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
ZoneId zone = id.type().zone(controller.system());
logger.log("Copying Vespa log from nodes of " + id.application() + " in " + zone + " ...");
try {
List<LogEntry> entries = new ArrayList<>();
String logs = IOUtils.readAll(controller.configServer().getLogStream(new DeploymentId(id.application(), zone),
Collections.emptyMap()),
StandardCharsets.UTF_8);
for (String line : logs.split("\n")) {
String[] parts = line.split("\t");
if (parts.length != 7) continue;
entries.add(new LogEntry(0,
(long) (Double.parseDouble(parts[0]) * 1000),
LogEntry.typeOf(LogLevel.parse(parts[5])),
parts[1] + '\t' + parts[3] + '\t' + parts[4] + '\n' +
parts[6].replaceAll("\\\\n", "\n")
.replaceAll("\\\\t", "\t")));
}
controller.jobController().log(id, Step.copyVespaLogs, entries);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
}
return Optional.of(running);
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration installationTimeout = Duration.ofMinutes(150);
private final Controller controller;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case deployTester: return deployTester(id, logger);
case installReal: return installReal(id, logger);
case installTester: return installTester(id, logger);
case startTests: return startTests(id, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.log(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e));
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
Optional.empty(),
new DeployOptions(false,
Optional.empty(),
false,
setTheStage)),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.jobController().run(id).get().versions().targetPlatform();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
return Optional.of(running);
}
catch (ConfigServerException e) {
if ( e.getErrorCode() == OUT_OF_CAPACITY && type.isTest()
|| e.getErrorCode() == ACTIVATION_CONFLICT
|| e.getErrorCode() == APPLICATION_LOCK_FAILURE
|| e.getErrorCode() == PARENT_HOST_NOT_READY) {
logger.log("Will retry, because of '" + e.getErrorCode() + "' deploying:\n" + e.getMessage());
return Optional.empty();
}
if ( e.getErrorCode() == INVALID_APPLICATION_PACKAGE
|| e.getErrorCode() == BAD_REQUEST) {
logger.log("Deployment failed: " + e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if ( ! deployment.isPresent()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
ApplicationVersion application = setTheStage ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication();
logger.log("Checking installation of " + platform + " and " + application.id() + " ...");
if ( nodesConverged(id.application(), id.type(), platform, logger)
&& servicesConverged(id.application(), id.type(), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
if (timedOut(deployment.get(), installationTimeout)) {
logger.log(INFO, "Installation failed to complete within " + installationTimeout.toMinutes() + " minutes!");
return Optional.of(installationFailed);
}
logger.log("Installation not yet complete.");
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if ( ! deployment.isPresent()) {
logger.log(WARNING, "Deployment expired before installation of tester was successful.");
return Optional.of(error);
}
Version platform = controller.jobController().run(id).get().versions().targetPlatform();
logger.log("Checking installation of tester container ...");
if ( nodesConverged(id.tester().id(), id.type(), platform, logger)
&& servicesConverged(id.tester().id(), id.type(), logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (timedOut(deployment.get(), installationTimeout)) {
logger.log(WARNING, "Installation of tester failed to complete within " + installationTimeout.toMinutes() + " minutes of real deployment!");
return Optional.of(error);
}
logger.log("Installation of tester not yet complete.");
return Optional.empty();
}
private boolean nodesConverged(ApplicationId id, JobType type, Version target, DualLogger logger) {
List<Node> nodes = controller.configServer().nodeRepository().list(type.zone(controller.system()), id, ImmutableSet.of(active, reserved));
List<String> statuses = nodes.stream()
.map(node -> String.format("%70s: %-16s%-25s%-32s%s",
node.hostname(),
node.serviceState(),
node.wantedVersion() + (node.currentVersion().equals(node.wantedVersion()) ? "" : " <-- " + node.currentVersion()),
node.restartGeneration() >= node.wantedRestartGeneration() ? ""
: "restart pending (" + node.wantedRestartGeneration() + " <-- " + node.restartGeneration() + ")",
node.rebootGeneration() >= node.wantedRebootGeneration() ? ""
: "reboot pending (" + node.wantedRebootGeneration() + " <-- " + node.rebootGeneration() + ")"))
.collect(Collectors.toList());
logger.log(statuses);
return nodes.stream().allMatch(node -> node.currentVersion().equals(target)
&& node.restartGeneration() >= node.wantedRestartGeneration()
&& node.rebootGeneration() >= node.wantedRebootGeneration());
}
private boolean servicesConverged(ApplicationId id, JobType type, DualLogger logger) {
Optional<ServiceConvergence> convergence = controller.configServer().serviceConvergence(new DeploymentId(id, type.zone(controller.system())));
if ( ! convergence.isPresent()) {
logger.log("Config status not currently available -- will retry.");
return false;
}
logger.log("Wanted config generation is " + convergence.get().wantedGeneration());
List<String> statuses = convergence.get().services().stream()
.filter(serviceStatus -> serviceStatus.currentGeneration() != convergence.get().wantedGeneration())
.map(serviceStatus -> String.format("%70s: %11s on port %4d has %s",
serviceStatus.host().value(),
serviceStatus.type(),
serviceStatus.port(),
serviceStatus.currentGeneration() == -1 ? "not started!" : Long.toString(serviceStatus.currentGeneration())))
.collect(Collectors.toList());
logger.log(statuses);
return convergence.get().converged();
}
private Optional<RunStatus> startTests(RunId id, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if ( ! deployment.isPresent()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(aborted);
}
Set<ZoneId> zones = testedZoneAndProductionZones(id);
logger.log("Attempting to find endpoints ...");
Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id.application(), zones);
List<String> messages = new ArrayList<>();
messages.add("Found endpoints");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach(uri -> messages.add(" |-- " + uri));
});
logger.log(messages);
if ( ! endpoints.containsKey(id.type().zone(controller.system()))) {
if (timedOut(deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
logger.log("Endpoints for the deployment to test are not yet ready.");
return Optional.empty();
}
Map<ZoneId, List<String>> clusters = listClusters(id.application(), zones);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isPresent() && controller.jobController().cloud().ready(testerEndpoint.get())) {
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type()),
testConfig(id.application(), id.type().zone(controller.system()),
controller.system(), endpoints, clusters));
return Optional.of(running);
}
if (timedOut(deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoint for tester failed to show up within " + endpointTimeout.toMinutes() + " minutes of real deployment!");
return Optional.of(error);
}
logger.log("Endpoints of tester container not yet available.");
return Optional.empty();
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if ( ! deployment(id.application(), id.type()).isPresent()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if ( ! testerEndpoint.isPresent()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
JobReport report = JobReport.ofJob(run.id().application(),
run.id().type(),
run.id().number(),
run.hasFailed() ? Optional.of(DeploymentJobs.JobError.unknown) : Optional.empty());
controller.applications().deploymentTrigger().notifyOfCompletion(report);
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "'no longer supposed to run?:", e);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().require(run.id().application());
Notifications notifications = application.deploymentSpec().notifications();
boolean newCommit = application.change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Application application(ApplicationId id) {
return controller.applications().require(id);
}
/** Returns whether the time elapsed since the last real deployment in the given zone is more than the given timeout. */
private boolean timedOut(Deployment deployment, Duration timeout) {
return deployment.at().isBefore(controller.clock().instant().minus(timeout));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().require(id.application()).deploymentSpec();
byte[] servicesXml = servicesXml(controller.system(), testerFlavorFor(id, spec));
byte[] testPackage = controller.applications().applicationStore().get(id.tester(), version);
ZoneId zone = id.type().zone(controller.system());
byte[] deploymentXml = deploymentXml(spec.athenzDomain(), spec.athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.deploysTo(id.type().environment()))
return step.zones().get(0).testerFlavor();
throw new IllegalStateException("No step deploys to the zone this run is for!");
}
/** Returns a stream containing the zone of the deployment tested in the given run, and all production zones for the application. */
private Set<ZoneId> testedZoneAndProductionZones(RunId id) {
return Stream.concat(Stream.of(id.type().zone(controller.system())),
application(id.application()).productionDeployments().keySet().stream())
.collect(Collectors.toSet());
}
/** Returns all endpoints for all current deployments of the given real application. */
private Map<ZoneId, List<URI>> deploymentEndpoints(ApplicationId id, Iterable<ZoneId> zones) {
ImmutableMap.Builder<ZoneId, List<URI>> deployments = ImmutableMap.builder();
for (ZoneId zone : zones)
controller.applications().getDeploymentEndpoints(new DeploymentId(id, zone))
.filter(endpoints -> ! endpoints.isEmpty())
.ifPresent(endpoints -> deployments.put(zone, endpoints));
return deployments.build();
}
/** Returns all content clusters in all current deployments of the given real application. */
private Map<ZoneId, List<String>> listClusters(ApplicationId id, Iterable<ZoneId> zones) {
ImmutableMap.Builder<ZoneId, List<String>> clusters = ImmutableMap.builder();
for (ZoneId zone : zones)
clusters.put(zone, ImmutableList.copyOf(controller.configServer().getContentClusters(new DeploymentId(id, zone))));
return clusters.build();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(SystemName systemName, Optional<String> testerFlavor) {
String domain = systemName == SystemName.main ? "vespa.vespa" : "vespa.vespa.cd";
String flavor = testerFlavor.orElse("d-1-4-50");
int memoryGb = Integer.parseInt(flavor.split("-")[2]);
int jdiscMemoryPercentage = (int) Math.ceil(200.0 / memoryGb);
int testMemoryMb = 768 * (memoryGb - 2);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='default'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <server id='default' port='4080'/>\n" +
" <filtering>\n" +
" <access-control domain='" + domain + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n" +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" flavor=\"" + flavor + "\" allocated-memory=\"" + jdiscMemoryPercentage + "%\" />\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(StandardCharsets.UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("")
+ "/>";
return deploymentSpec.getBytes(StandardCharsets.UTF_8);
}
/** Returns the config for the tests to run for the given job. */
private static byte[] testConfig(ApplicationId id, ZoneId testerZone, SystemName system,
Map<ZoneId, List<URI>> deployments, Map<ZoneId, List<String>> clusters) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString("application", id.serializedForm());
root.setString("zone", testerZone.value());
root.setString("system", system.name());
Cursor endpointsObject = root.setObject("endpoints");
deployments.forEach((zone, endpoints) -> {
Cursor endpointArray = endpointsObject.setArray(zone.value());
for (URI endpoint : endpoints)
endpointArray.addString(endpoint.toString());
});
Cursor clustersObject = root.setObject("clusters");
clusters.forEach((zone, clusterList) -> {
Cursor clusterArray = clustersObject.setArray(zone.value());
for (String cluster : clusterList)
clusterArray.addString(cluster);
});
try {
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, DEBUG, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration installationTimeout = Duration.ofMinutes(150);
private final Controller controller;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case deployTester: return deployTester(id, logger);
case installReal: return installReal(id, logger);
case installTester: return installTester(id, logger);
case startTests: return startTests(id, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.log(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e));
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
Optional.empty(),
new DeployOptions(false,
Optional.empty(),
false,
setTheStage)),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.jobController().run(id).get().versions().targetPlatform();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
return Optional.of(running);
}
catch (ConfigServerException e) {
if ( e.getErrorCode() == OUT_OF_CAPACITY && type.isTest()
|| e.getErrorCode() == ACTIVATION_CONFLICT
|| e.getErrorCode() == APPLICATION_LOCK_FAILURE
|| e.getErrorCode() == PARENT_HOST_NOT_READY) {
logger.log("Will retry, because of '" + e.getErrorCode() + "' deploying:\n" + e.getMessage());
return Optional.empty();
}
if ( e.getErrorCode() == INVALID_APPLICATION_PACKAGE
|| e.getErrorCode() == BAD_REQUEST) {
logger.log("Deployment failed: " + e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if ( ! deployment.isPresent()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
ApplicationVersion application = setTheStage ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication();
logger.log("Checking installation of " + platform + " and " + application.id() + " ...");
if ( nodesConverged(id.application(), id.type(), platform, logger)
&& servicesConverged(id.application(), id.type(), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
if (timedOut(deployment.get(), installationTimeout)) {
logger.log(INFO, "Installation failed to complete within " + installationTimeout.toMinutes() + " minutes!");
return Optional.of(installationFailed);
}
logger.log("Installation not yet complete.");
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if ( ! deployment.isPresent()) {
logger.log(WARNING, "Deployment expired before installation of tester was successful.");
return Optional.of(error);
}
Version platform = controller.jobController().run(id).get().versions().targetPlatform();
logger.log("Checking installation of tester container ...");
if ( nodesConverged(id.tester().id(), id.type(), platform, logger)
&& servicesConverged(id.tester().id(), id.type(), logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (timedOut(deployment.get(), installationTimeout)) {
logger.log(WARNING, "Installation of tester failed to complete within " + installationTimeout.toMinutes() + " minutes of real deployment!");
return Optional.of(error);
}
logger.log("Installation of tester not yet complete.");
return Optional.empty();
}
private boolean nodesConverged(ApplicationId id, JobType type, Version target, DualLogger logger) {
List<Node> nodes = controller.configServer().nodeRepository().list(type.zone(controller.system()), id, ImmutableSet.of(active, reserved));
List<String> statuses = nodes.stream()
.map(node -> String.format("%70s: %-16s%-25s%-32s%s",
node.hostname(),
node.serviceState(),
node.wantedVersion() + (node.currentVersion().equals(node.wantedVersion()) ? "" : " <-- " + node.currentVersion()),
node.restartGeneration() >= node.wantedRestartGeneration() ? ""
: "restart pending (" + node.wantedRestartGeneration() + " <-- " + node.restartGeneration() + ")",
node.rebootGeneration() >= node.wantedRebootGeneration() ? ""
: "reboot pending (" + node.wantedRebootGeneration() + " <-- " + node.rebootGeneration() + ")"))
.collect(Collectors.toList());
logger.log(statuses);
return nodes.stream().allMatch(node -> node.currentVersion().equals(target)
&& node.restartGeneration() >= node.wantedRestartGeneration()
&& node.rebootGeneration() >= node.wantedRebootGeneration());
}
private boolean servicesConverged(ApplicationId id, JobType type, DualLogger logger) {
Optional<ServiceConvergence> convergence = controller.configServer().serviceConvergence(new DeploymentId(id, type.zone(controller.system())));
if ( ! convergence.isPresent()) {
logger.log("Config status not currently available -- will retry.");
return false;
}
logger.log("Wanted config generation is " + convergence.get().wantedGeneration());
List<String> statuses = convergence.get().services().stream()
.filter(serviceStatus -> serviceStatus.currentGeneration() != convergence.get().wantedGeneration())
.map(serviceStatus -> String.format("%70s: %11s on port %4d has %s",
serviceStatus.host().value(),
serviceStatus.type(),
serviceStatus.port(),
serviceStatus.currentGeneration() == -1 ? "not started!" : Long.toString(serviceStatus.currentGeneration())))
.collect(Collectors.toList());
logger.log(statuses);
return convergence.get().converged();
}
private Optional<RunStatus> startTests(RunId id, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if ( ! deployment.isPresent()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(aborted);
}
Set<ZoneId> zones = testedZoneAndProductionZones(id);
logger.log("Attempting to find endpoints ...");
Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id.application(), zones);
List<String> messages = new ArrayList<>();
messages.add("Found endpoints");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach(uri -> messages.add(" |-- " + uri));
});
logger.log(messages);
if ( ! endpoints.containsKey(id.type().zone(controller.system()))) {
if (timedOut(deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
logger.log("Endpoints for the deployment to test are not yet ready.");
return Optional.empty();
}
Map<ZoneId, List<String>> clusters = listClusters(id.application(), zones);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isPresent() && controller.jobController().cloud().ready(testerEndpoint.get())) {
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type()),
testConfig(id.application(), id.type().zone(controller.system()),
controller.system(), endpoints, clusters));
return Optional.of(running);
}
if (timedOut(deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoint for tester failed to show up within " + endpointTimeout.toMinutes() + " minutes of real deployment!");
return Optional.of(error);
}
logger.log("Endpoints of tester container not yet available.");
return Optional.empty();
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if ( ! deployment(id.application(), id.type()).isPresent()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if ( ! testerEndpoint.isPresent()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
JobReport report = JobReport.ofJob(run.id().application(),
run.id().type(),
run.id().number(),
run.hasFailed() ? Optional.of(DeploymentJobs.JobError.unknown) : Optional.empty());
controller.applications().deploymentTrigger().notifyOfCompletion(report);
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "'no longer supposed to run?:", e);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().require(run.id().application());
Notifications notifications = application.deploymentSpec().notifications();
boolean newCommit = application.change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Application application(ApplicationId id) {
return controller.applications().require(id);
}
/** Returns whether the time elapsed since the last real deployment in the given zone is more than the given timeout. */
private boolean timedOut(Deployment deployment, Duration timeout) {
return deployment.at().isBefore(controller.clock().instant().minus(timeout));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().require(id.application()).deploymentSpec();
byte[] servicesXml = servicesXml(controller.system(), testerFlavorFor(id, spec));
byte[] testPackage = controller.applications().applicationStore().get(id.tester(), version);
ZoneId zone = id.type().zone(controller.system());
byte[] deploymentXml = deploymentXml(spec.athenzDomain(), spec.athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.deploysTo(id.type().environment()))
return step.zones().get(0).testerFlavor();
throw new IllegalStateException("No step deploys to the zone this run is for!");
}
/** Returns a stream containing the zone of the deployment tested in the given run, and all production zones for the application. */
private Set<ZoneId> testedZoneAndProductionZones(RunId id) {
return Stream.concat(Stream.of(id.type().zone(controller.system())),
application(id.application()).productionDeployments().keySet().stream())
.collect(Collectors.toSet());
}
/** Returns all endpoints for all current deployments of the given real application. */
private Map<ZoneId, List<URI>> deploymentEndpoints(ApplicationId id, Iterable<ZoneId> zones) {
ImmutableMap.Builder<ZoneId, List<URI>> deployments = ImmutableMap.builder();
for (ZoneId zone : zones)
controller.applications().getDeploymentEndpoints(new DeploymentId(id, zone))
.filter(endpoints -> ! endpoints.isEmpty())
.ifPresent(endpoints -> deployments.put(zone, endpoints));
return deployments.build();
}
/** Returns all content clusters in all current deployments of the given real application. */
private Map<ZoneId, List<String>> listClusters(ApplicationId id, Iterable<ZoneId> zones) {
ImmutableMap.Builder<ZoneId, List<String>> clusters = ImmutableMap.builder();
for (ZoneId zone : zones)
clusters.put(zone, ImmutableList.copyOf(controller.configServer().getContentClusters(new DeploymentId(id, zone))));
return clusters.build();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(SystemName systemName, Optional<String> testerFlavor) {
String domain = systemName == SystemName.main ? "vespa.vespa" : "vespa.vespa.cd";
String flavor = testerFlavor.orElse("d-1-4-50");
int memoryGb = Integer.parseInt(flavor.split("-")[2]);
int jdiscMemoryPercentage = (int) Math.ceil(200.0 / memoryGb);
int testMemoryMb = 768 * (memoryGb - 2);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='default'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <server id='default' port='4080'/>\n" +
" <filtering>\n" +
" <access-control domain='" + domain + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n" +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" flavor=\"" + flavor + "\" allocated-memory=\"" + jdiscMemoryPercentage + "%\" />\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(StandardCharsets.UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("")
+ "/>";
return deploymentSpec.getBytes(StandardCharsets.UTF_8);
}
/** Returns the config for the tests to run for the given job. */
private static byte[] testConfig(ApplicationId id, ZoneId testerZone, SystemName system,
Map<ZoneId, List<URI>> deployments, Map<ZoneId, List<String>> clusters) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString("application", id.serializedForm());
root.setString("zone", testerZone.value());
root.setString("system", system.name());
Cursor endpointsObject = root.setObject("endpoints");
deployments.forEach((zone, endpoints) -> {
Cursor endpointArray = endpointsObject.setArray(zone.value());
for (URI endpoint : endpoints)
endpointArray.addString(endpoint.toString());
});
Cursor clustersObject = root.setObject("clusters");
clusters.forEach((zone, clusterList) -> {
Cursor clusterArray = clustersObject.setArray(zone.value());
for (String cluster : clusterList)
clusterArray.addString(cluster);
});
try {
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, DEBUG, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} |
No, it's a regex, so needs double quoting :) | private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
ZoneId zone = id.type().zone(controller.system());
logger.log("Copying Vespa log from nodes of " + id.application() + " in " + zone + " ...");
try {
List<LogEntry> entries = new ArrayList<>();
String logs = IOUtils.readAll(controller.configServer().getLogStream(new DeploymentId(id.application(), zone),
Collections.emptyMap()),
StandardCharsets.UTF_8);
for (String line : logs.split("\n")) {
String[] parts = line.split("\t");
if (parts.length != 7) continue;
entries.add(new LogEntry(0,
(long) (Double.parseDouble(parts[0]) * 1000),
LogEntry.typeOf(LogLevel.parse(parts[5])),
parts[1] + '\t' + parts[3] + '\t' + parts[4] + '\n' +
parts[6].replaceAll("\\\\n", "\n")
.replaceAll("\\\\t", "\t")));
}
controller.jobController().log(id, Step.copyVespaLogs, entries);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
}
return Optional.of(running);
} | .replaceAll("\\\\t", "\t"))); | private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) {
ZoneId zone = id.type().zone(controller.system());
logger.log("Copying Vespa log from nodes of " + id.application() + " in " + zone + " ...");
try {
List<LogEntry> entries = new ArrayList<>();
String logs = IOUtils.readAll(controller.configServer().getLogStream(new DeploymentId(id.application(), zone),
Collections.emptyMap()),
StandardCharsets.UTF_8);
for (String line : logs.split("\n")) {
String[] parts = line.split("\t");
if (parts.length != 7) continue;
entries.add(new LogEntry(0,
(long) (Double.parseDouble(parts[0]) * 1000),
LogEntry.typeOf(LogLevel.parse(parts[5])),
parts[1] + '\t' + parts[3] + '\t' + parts[4] + '\n' +
parts[6].replaceAll("\\\\n", "\n")
.replaceAll("\\\\t", "\t")));
}
controller.jobController().log(id, Step.copyVespaLogs, entries);
}
catch (Exception e) {
logger.log(INFO, "Failure getting vespa logs for " + id, e);
}
return Optional.of(running);
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration installationTimeout = Duration.ofMinutes(150);
private final Controller controller;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case deployTester: return deployTester(id, logger);
case installReal: return installReal(id, logger);
case installTester: return installTester(id, logger);
case startTests: return startTests(id, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.log(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e));
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
Optional.empty(),
new DeployOptions(false,
Optional.empty(),
false,
setTheStage)),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.jobController().run(id).get().versions().targetPlatform();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
return Optional.of(running);
}
catch (ConfigServerException e) {
if ( e.getErrorCode() == OUT_OF_CAPACITY && type.isTest()
|| e.getErrorCode() == ACTIVATION_CONFLICT
|| e.getErrorCode() == APPLICATION_LOCK_FAILURE
|| e.getErrorCode() == PARENT_HOST_NOT_READY) {
logger.log("Will retry, because of '" + e.getErrorCode() + "' deploying:\n" + e.getMessage());
return Optional.empty();
}
if ( e.getErrorCode() == INVALID_APPLICATION_PACKAGE
|| e.getErrorCode() == BAD_REQUEST) {
logger.log("Deployment failed: " + e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if ( ! deployment.isPresent()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
ApplicationVersion application = setTheStage ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication();
logger.log("Checking installation of " + platform + " and " + application.id() + " ...");
if ( nodesConverged(id.application(), id.type(), platform, logger)
&& servicesConverged(id.application(), id.type(), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
if (timedOut(deployment.get(), installationTimeout)) {
logger.log(INFO, "Installation failed to complete within " + installationTimeout.toMinutes() + " minutes!");
return Optional.of(installationFailed);
}
logger.log("Installation not yet complete.");
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if ( ! deployment.isPresent()) {
logger.log(WARNING, "Deployment expired before installation of tester was successful.");
return Optional.of(error);
}
Version platform = controller.jobController().run(id).get().versions().targetPlatform();
logger.log("Checking installation of tester container ...");
if ( nodesConverged(id.tester().id(), id.type(), platform, logger)
&& servicesConverged(id.tester().id(), id.type(), logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (timedOut(deployment.get(), installationTimeout)) {
logger.log(WARNING, "Installation of tester failed to complete within " + installationTimeout.toMinutes() + " minutes of real deployment!");
return Optional.of(error);
}
logger.log("Installation of tester not yet complete.");
return Optional.empty();
}
private boolean nodesConverged(ApplicationId id, JobType type, Version target, DualLogger logger) {
List<Node> nodes = controller.configServer().nodeRepository().list(type.zone(controller.system()), id, ImmutableSet.of(active, reserved));
List<String> statuses = nodes.stream()
.map(node -> String.format("%70s: %-16s%-25s%-32s%s",
node.hostname(),
node.serviceState(),
node.wantedVersion() + (node.currentVersion().equals(node.wantedVersion()) ? "" : " <-- " + node.currentVersion()),
node.restartGeneration() >= node.wantedRestartGeneration() ? ""
: "restart pending (" + node.wantedRestartGeneration() + " <-- " + node.restartGeneration() + ")",
node.rebootGeneration() >= node.wantedRebootGeneration() ? ""
: "reboot pending (" + node.wantedRebootGeneration() + " <-- " + node.rebootGeneration() + ")"))
.collect(Collectors.toList());
logger.log(statuses);
return nodes.stream().allMatch(node -> node.currentVersion().equals(target)
&& node.restartGeneration() >= node.wantedRestartGeneration()
&& node.rebootGeneration() >= node.wantedRebootGeneration());
}
private boolean servicesConverged(ApplicationId id, JobType type, DualLogger logger) {
Optional<ServiceConvergence> convergence = controller.configServer().serviceConvergence(new DeploymentId(id, type.zone(controller.system())));
if ( ! convergence.isPresent()) {
logger.log("Config status not currently available -- will retry.");
return false;
}
logger.log("Wanted config generation is " + convergence.get().wantedGeneration());
List<String> statuses = convergence.get().services().stream()
.filter(serviceStatus -> serviceStatus.currentGeneration() != convergence.get().wantedGeneration())
.map(serviceStatus -> String.format("%70s: %11s on port %4d has %s",
serviceStatus.host().value(),
serviceStatus.type(),
serviceStatus.port(),
serviceStatus.currentGeneration() == -1 ? "not started!" : Long.toString(serviceStatus.currentGeneration())))
.collect(Collectors.toList());
logger.log(statuses);
return convergence.get().converged();
}
private Optional<RunStatus> startTests(RunId id, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if ( ! deployment.isPresent()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(aborted);
}
Set<ZoneId> zones = testedZoneAndProductionZones(id);
logger.log("Attempting to find endpoints ...");
Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id.application(), zones);
List<String> messages = new ArrayList<>();
messages.add("Found endpoints");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach(uri -> messages.add(" |-- " + uri));
});
logger.log(messages);
if ( ! endpoints.containsKey(id.type().zone(controller.system()))) {
if (timedOut(deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
logger.log("Endpoints for the deployment to test are not yet ready.");
return Optional.empty();
}
Map<ZoneId, List<String>> clusters = listClusters(id.application(), zones);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isPresent() && controller.jobController().cloud().ready(testerEndpoint.get())) {
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type()),
testConfig(id.application(), id.type().zone(controller.system()),
controller.system(), endpoints, clusters));
return Optional.of(running);
}
if (timedOut(deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoint for tester failed to show up within " + endpointTimeout.toMinutes() + " minutes of real deployment!");
return Optional.of(error);
}
logger.log("Endpoints of tester container not yet available.");
return Optional.empty();
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if ( ! deployment(id.application(), id.type()).isPresent()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if ( ! testerEndpoint.isPresent()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
JobReport report = JobReport.ofJob(run.id().application(),
run.id().type(),
run.id().number(),
run.hasFailed() ? Optional.of(DeploymentJobs.JobError.unknown) : Optional.empty());
controller.applications().deploymentTrigger().notifyOfCompletion(report);
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "'no longer supposed to run?:", e);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().require(run.id().application());
Notifications notifications = application.deploymentSpec().notifications();
boolean newCommit = application.change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Application application(ApplicationId id) {
return controller.applications().require(id);
}
/** Returns whether the time elapsed since the last real deployment in the given zone is more than the given timeout. */
private boolean timedOut(Deployment deployment, Duration timeout) {
return deployment.at().isBefore(controller.clock().instant().minus(timeout));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().require(id.application()).deploymentSpec();
byte[] servicesXml = servicesXml(controller.system(), testerFlavorFor(id, spec));
byte[] testPackage = controller.applications().applicationStore().get(id.tester(), version);
ZoneId zone = id.type().zone(controller.system());
byte[] deploymentXml = deploymentXml(spec.athenzDomain(), spec.athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.deploysTo(id.type().environment()))
return step.zones().get(0).testerFlavor();
throw new IllegalStateException("No step deploys to the zone this run is for!");
}
/** Returns a stream containing the zone of the deployment tested in the given run, and all production zones for the application. */
private Set<ZoneId> testedZoneAndProductionZones(RunId id) {
return Stream.concat(Stream.of(id.type().zone(controller.system())),
application(id.application()).productionDeployments().keySet().stream())
.collect(Collectors.toSet());
}
/** Returns all endpoints for all current deployments of the given real application. */
private Map<ZoneId, List<URI>> deploymentEndpoints(ApplicationId id, Iterable<ZoneId> zones) {
ImmutableMap.Builder<ZoneId, List<URI>> deployments = ImmutableMap.builder();
for (ZoneId zone : zones)
controller.applications().getDeploymentEndpoints(new DeploymentId(id, zone))
.filter(endpoints -> ! endpoints.isEmpty())
.ifPresent(endpoints -> deployments.put(zone, endpoints));
return deployments.build();
}
/** Returns all content clusters in all current deployments of the given real application. */
private Map<ZoneId, List<String>> listClusters(ApplicationId id, Iterable<ZoneId> zones) {
ImmutableMap.Builder<ZoneId, List<String>> clusters = ImmutableMap.builder();
for (ZoneId zone : zones)
clusters.put(zone, ImmutableList.copyOf(controller.configServer().getContentClusters(new DeploymentId(id, zone))));
return clusters.build();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(SystemName systemName, Optional<String> testerFlavor) {
String domain = systemName == SystemName.main ? "vespa.vespa" : "vespa.vespa.cd";
String flavor = testerFlavor.orElse("d-1-4-50");
int memoryGb = Integer.parseInt(flavor.split("-")[2]);
int jdiscMemoryPercentage = (int) Math.ceil(200.0 / memoryGb);
int testMemoryMb = 768 * (memoryGb - 2);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='default'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <server id='default' port='4080'/>\n" +
" <filtering>\n" +
" <access-control domain='" + domain + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n" +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" flavor=\"" + flavor + "\" allocated-memory=\"" + jdiscMemoryPercentage + "%\" />\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(StandardCharsets.UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("")
+ "/>";
return deploymentSpec.getBytes(StandardCharsets.UTF_8);
}
/** Returns the config for the tests to run for the given job. */
private static byte[] testConfig(ApplicationId id, ZoneId testerZone, SystemName system,
Map<ZoneId, List<URI>> deployments, Map<ZoneId, List<String>> clusters) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString("application", id.serializedForm());
root.setString("zone", testerZone.value());
root.setString("system", system.name());
Cursor endpointsObject = root.setObject("endpoints");
deployments.forEach((zone, endpoints) -> {
Cursor endpointArray = endpointsObject.setArray(zone.value());
for (URI endpoint : endpoints)
endpointArray.addString(endpoint.toString());
});
Cursor clustersObject = root.setObject("clusters");
clusters.forEach((zone, clusterList) -> {
Cursor clusterArray = clustersObject.setArray(zone.value());
for (String cluster : clusterList)
clusterArray.addString(cluster);
});
try {
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, DEBUG, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} | class InternalStepRunner implements StepRunner {
private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName());
static final Duration endpointTimeout = Duration.ofMinutes(15);
static final Duration installationTimeout = Duration.ofMinutes(150);
private final Controller controller;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@Override
public Optional<RunStatus> run(LockedStep step, RunId id) {
DualLogger logger = new DualLogger(id, step.get());
try {
switch (step.get()) {
case deployInitialReal: return deployInitialReal(id, logger);
case installInitialReal: return installInitialReal(id, logger);
case deployReal: return deployReal(id, logger);
case deployTester: return deployTester(id, logger);
case installReal: return installReal(id, logger);
case installTester: return installTester(id, logger);
case startTests: return startTests(id, logger);
case endTests: return endTests(id, logger);
case copyVespaLogs: return copyVespaLogs(id, logger);
case deactivateReal: return deactivateReal(id, logger);
case deactivateTester: return deactivateTester(id, logger);
case report: return report(id, logger);
default: throw new AssertionError("Unknown step '" + step + "'!");
}
}
catch (UncheckedIOException e) {
logger.log(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e));
return Optional.empty();
}
catch (RuntimeException e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
return Optional.of(error);
}
}
private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " +
versions.sourcePlatform().orElse(versions.targetPlatform()) +
" and application version " +
versions.sourceApplication().orElse(versions.targetApplication()).id() + " ...");
return deployReal(id, true, logger);
}
private Optional<RunStatus> deployReal(RunId id, DualLogger logger) {
Versions versions = controller.jobController().run(id).get().versions();
logger.log("Deploying platform version " + versions.targetPlatform() +
" and application version " + versions.targetApplication().id() + " ...");
return deployReal(id, false, logger);
}
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
return deploy(id.application(),
id.type(),
() -> controller.applications().deploy(id.application(),
id.type().zone(controller.system()),
Optional.empty(),
new DeployOptions(false,
Optional.empty(),
false,
setTheStage)),
logger);
}
private Optional<RunStatus> deployTester(RunId id, DualLogger logger) {
Version platform = controller.jobController().run(id).get().versions().targetPlatform();
logger.log("Deploying the tester container on platform " + platform + " ...");
return deploy(id.tester().id(),
id.type(),
() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(controller.system()),
new DeployOptions(true,
Optional.of(platform),
false,
false)),
logger);
}
private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment, DualLogger logger) {
try {
PrepareResponse prepareResponse = deployment.get().prepareResponse();
if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) {
List<String> messages = new ArrayList<>();
messages.add("Deploy failed due to non-compatible changes that require re-feed.");
messages.add("Your options are:");
messages.add("1. Revert the incompatible changes.");
messages.add("2. If you think it is safe in your case, you can override this validation, see");
messages.add(" http:
messages.add("3. Deploy as a new application under a different name.");
messages.add("Illegal actions:");
prepareResponse.configChangeActions.refeedActions.stream()
.filter(action -> ! action.allowed)
.flatMap(action -> action.messages.stream())
.forEach(messages::add);
messages.add("Details:");
prepareResponse.log.stream()
.map(entry -> entry.message)
.forEach(messages::add);
logger.log(messages);
return Optional.of(deploymentFailed);
}
if (prepareResponse.configChangeActions.restartActions.isEmpty())
logger.log("No services requiring restart.");
else
prepareResponse.configChangeActions.restartActions.stream()
.flatMap(action -> action.services.stream())
.map(service -> service.hostName)
.sorted().distinct()
.map(Hostname::new)
.forEach(hostname -> {
controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname));
logger.log("Restarting services on host " + hostname.id() + ".");
});
logger.log("Deployment successful.");
return Optional.of(running);
}
catch (ConfigServerException e) {
if ( e.getErrorCode() == OUT_OF_CAPACITY && type.isTest()
|| e.getErrorCode() == ACTIVATION_CONFLICT
|| e.getErrorCode() == APPLICATION_LOCK_FAILURE
|| e.getErrorCode() == PARENT_HOST_NOT_READY) {
logger.log("Will retry, because of '" + e.getErrorCode() + "' deploying:\n" + e.getMessage());
return Optional.empty();
}
if ( e.getErrorCode() == INVALID_APPLICATION_PACKAGE
|| e.getErrorCode() == BAD_REQUEST) {
logger.log("Deployment failed: " + e.getMessage());
return Optional.of(deploymentFailed);
}
throw e;
}
}
private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) {
return installReal(id, true, logger);
}
private Optional<RunStatus> installReal(RunId id, DualLogger logger) {
return installReal(id, false, logger);
}
private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if ( ! deployment.isPresent()) {
logger.log(INFO, "Deployment expired before installation was successful.");
return Optional.of(installationFailed);
}
Versions versions = controller.jobController().run(id).get().versions();
Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform();
ApplicationVersion application = setTheStage ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication();
logger.log("Checking installation of " + platform + " and " + application.id() + " ...");
if ( nodesConverged(id.application(), id.type(), platform, logger)
&& servicesConverged(id.application(), id.type(), logger)) {
logger.log("Installation succeeded!");
return Optional.of(running);
}
if (timedOut(deployment.get(), installationTimeout)) {
logger.log(INFO, "Installation failed to complete within " + installationTimeout.toMinutes() + " minutes!");
return Optional.of(installationFailed);
}
logger.log("Installation not yet complete.");
return Optional.empty();
}
private Optional<RunStatus> installTester(RunId id, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if ( ! deployment.isPresent()) {
logger.log(WARNING, "Deployment expired before installation of tester was successful.");
return Optional.of(error);
}
Version platform = controller.jobController().run(id).get().versions().targetPlatform();
logger.log("Checking installation of tester container ...");
if ( nodesConverged(id.tester().id(), id.type(), platform, logger)
&& servicesConverged(id.tester().id(), id.type(), logger)) {
logger.log("Tester container successfully installed!");
return Optional.of(running);
}
if (timedOut(deployment.get(), installationTimeout)) {
logger.log(WARNING, "Installation of tester failed to complete within " + installationTimeout.toMinutes() + " minutes of real deployment!");
return Optional.of(error);
}
logger.log("Installation of tester not yet complete.");
return Optional.empty();
}
private boolean nodesConverged(ApplicationId id, JobType type, Version target, DualLogger logger) {
List<Node> nodes = controller.configServer().nodeRepository().list(type.zone(controller.system()), id, ImmutableSet.of(active, reserved));
List<String> statuses = nodes.stream()
.map(node -> String.format("%70s: %-16s%-25s%-32s%s",
node.hostname(),
node.serviceState(),
node.wantedVersion() + (node.currentVersion().equals(node.wantedVersion()) ? "" : " <-- " + node.currentVersion()),
node.restartGeneration() >= node.wantedRestartGeneration() ? ""
: "restart pending (" + node.wantedRestartGeneration() + " <-- " + node.restartGeneration() + ")",
node.rebootGeneration() >= node.wantedRebootGeneration() ? ""
: "reboot pending (" + node.wantedRebootGeneration() + " <-- " + node.rebootGeneration() + ")"))
.collect(Collectors.toList());
logger.log(statuses);
return nodes.stream().allMatch(node -> node.currentVersion().equals(target)
&& node.restartGeneration() >= node.wantedRestartGeneration()
&& node.rebootGeneration() >= node.wantedRebootGeneration());
}
private boolean servicesConverged(ApplicationId id, JobType type, DualLogger logger) {
Optional<ServiceConvergence> convergence = controller.configServer().serviceConvergence(new DeploymentId(id, type.zone(controller.system())));
if ( ! convergence.isPresent()) {
logger.log("Config status not currently available -- will retry.");
return false;
}
logger.log("Wanted config generation is " + convergence.get().wantedGeneration());
List<String> statuses = convergence.get().services().stream()
.filter(serviceStatus -> serviceStatus.currentGeneration() != convergence.get().wantedGeneration())
.map(serviceStatus -> String.format("%70s: %11s on port %4d has %s",
serviceStatus.host().value(),
serviceStatus.type(),
serviceStatus.port(),
serviceStatus.currentGeneration() == -1 ? "not started!" : Long.toString(serviceStatus.currentGeneration())))
.collect(Collectors.toList());
logger.log(statuses);
return convergence.get().converged();
}
private Optional<RunStatus> startTests(RunId id, DualLogger logger) {
Optional<Deployment> deployment = deployment(id.application(), id.type());
if ( ! deployment.isPresent()) {
logger.log(INFO, "Deployment expired before tests could start.");
return Optional.of(aborted);
}
Set<ZoneId> zones = testedZoneAndProductionZones(id);
logger.log("Attempting to find endpoints ...");
Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id.application(), zones);
List<String> messages = new ArrayList<>();
messages.add("Found endpoints");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
uris.forEach(uri -> messages.add(" |-- " + uri));
});
logger.log(messages);
if ( ! endpoints.containsKey(id.type().zone(controller.system()))) {
if (timedOut(deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
logger.log("Endpoints for the deployment to test are not yet ready.");
return Optional.empty();
}
Map<ZoneId, List<String>> clusters = listClusters(id.application(), zones);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isPresent() && controller.jobController().cloud().ready(testerEndpoint.get())) {
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type()),
testConfig(id.application(), id.type().zone(controller.system()),
controller.system(), endpoints, clusters));
return Optional.of(running);
}
if (timedOut(deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoint for tester failed to show up within " + endpointTimeout.toMinutes() + " minutes of real deployment!");
return Optional.of(error);
}
logger.log("Endpoints of tester container not yet available.");
return Optional.empty();
}
private Optional<RunStatus> endTests(RunId id, DualLogger logger) {
if ( ! deployment(id.application(), id.type()).isPresent()) {
logger.log(INFO, "Deployment expired before tests could complete.");
return Optional.of(aborted);
}
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if ( ! testerEndpoint.isPresent()) {
logger.log("Endpoints for tester not found -- trying again later.");
return Optional.empty();
}
controller.jobController().updateTestLog(id);
TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get());
switch (testStatus) {
case NOT_STARTED:
throw new IllegalStateException("Tester reports tests not started, even though they should have!");
case RUNNING:
return Optional.empty();
case FAILURE:
logger.log("Tests failed.");
return Optional.of(testFailure);
case ERROR:
logger.log(INFO, "Tester failed running its tests!");
return Optional.of(error);
case SUCCESS:
logger.log("Tests completed successfully.");
return Optional.of(running);
default:
throw new IllegalStateException("Unknown status '" + testStatus + "'!");
}
}
private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) {
logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.applications().deactivate(id.application(), id.type().zone(controller.system()));
return Optional.of(running);
}
private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) {
logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ...");
controller.jobController().deactivateTester(id.tester(), id.type());
return Optional.of(running);
}
private Optional<RunStatus> report(RunId id, DualLogger logger) {
try {
controller.jobController().active(id).ifPresent(run -> {
JobReport report = JobReport.ofJob(run.id().application(),
run.id().type(),
run.id().number(),
run.hasFailed() ? Optional.of(DeploymentJobs.JobError.unknown) : Optional.empty());
controller.applications().deploymentTrigger().notifyOfCompletion(report);
if (run.hasFailed())
sendNotification(run, logger);
});
}
catch (IllegalStateException e) {
logger.log(INFO, "Job '" + id.type() + "'no longer supposed to run?:", e);
}
return Optional.of(running);
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
Application application = controller.applications().require(run.id().application());
Notifications notifications = application.deploymentSpec().notifications();
boolean newCommit = application.change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when));
if (notifications.emailRolesFor(when).contains(author))
run.versions().targetApplication().authorEmail().ifPresent(recipients::add);
if (recipients.isEmpty())
return;
try {
if (run.status() == outOfCapacity && run.id().type().isProduction())
controller.mailer().send(mails.outOfCapacity(run.id(), recipients));
if (run.status() == deploymentFailed)
controller.mailer().send(mails.deploymentFailure(run.id(), recipients));
if (run.status() == installationFailed)
controller.mailer().send(mails.installationFailure(run.id(), recipients));
if (run.status() == testFailure)
controller.mailer().send(mails.testFailure(run.id(), recipients));
if (run.status() == error)
controller.mailer().send(mails.systemError(run.id(), recipients));
}
catch (RuntimeException e) {
logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
}
}
/** Returns the deployment of the real application in the zone of the given job, if it exists. */
private Optional<Deployment> deployment(ApplicationId id, JobType type) {
return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system())));
}
/** Returns the real application with the given id. */
private Application application(ApplicationId id) {
return controller.applications().require(id);
}
/** Returns whether the time elapsed since the last real deployment in the given zone is more than the given timeout. */
private boolean timedOut(Deployment deployment, Duration timeout) {
return deployment.at().isBefore(controller.clock().instant().minus(timeout));
}
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
DeploymentSpec spec = controller.applications().require(id.application()).deploymentSpec();
byte[] servicesXml = servicesXml(controller.system(), testerFlavorFor(id, spec));
byte[] testPackage = controller.applications().applicationStore().get(id.tester(), version);
ZoneId zone = id.type().zone(controller.system());
byte[] deploymentXml = deploymentXml(spec.athenzDomain(), spec.athenzService(zone.environment(), zone.region()));
try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) {
zipBuilder.add(testPackage);
zipBuilder.add("services.xml", servicesXml);
zipBuilder.add("deployment.xml", deploymentXml);
zipBuilder.close();
return new ApplicationPackage(zipBuilder.toByteArray());
}
}
private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) {
for (DeploymentSpec.Step step : spec.steps())
if (step.deploysTo(id.type().environment()))
return step.zones().get(0).testerFlavor();
throw new IllegalStateException("No step deploys to the zone this run is for!");
}
/** Returns a stream containing the zone of the deployment tested in the given run, and all production zones for the application. */
private Set<ZoneId> testedZoneAndProductionZones(RunId id) {
return Stream.concat(Stream.of(id.type().zone(controller.system())),
application(id.application()).productionDeployments().keySet().stream())
.collect(Collectors.toSet());
}
/** Returns all endpoints for all current deployments of the given real application. */
private Map<ZoneId, List<URI>> deploymentEndpoints(ApplicationId id, Iterable<ZoneId> zones) {
ImmutableMap.Builder<ZoneId, List<URI>> deployments = ImmutableMap.builder();
for (ZoneId zone : zones)
controller.applications().getDeploymentEndpoints(new DeploymentId(id, zone))
.filter(endpoints -> ! endpoints.isEmpty())
.ifPresent(endpoints -> deployments.put(zone, endpoints));
return deployments.build();
}
/** Returns all content clusters in all current deployments of the given real application. */
private Map<ZoneId, List<String>> listClusters(ApplicationId id, Iterable<ZoneId> zones) {
ImmutableMap.Builder<ZoneId, List<String>> clusters = ImmutableMap.builder();
for (ZoneId zone : zones)
clusters.put(zone, ImmutableList.copyOf(controller.configServer().getContentClusters(new DeploymentId(id, zone))));
return clusters.build();
}
/** Returns the generated services.xml content for the tester application. */
static byte[] servicesXml(SystemName systemName, Optional<String> testerFlavor) {
String domain = systemName == SystemName.main ? "vespa.vespa" : "vespa.vespa.cd";
String flavor = testerFlavor.orElse("d-1-4-50");
int memoryGb = Integer.parseInt(flavor.split("-")[2]);
int jdiscMemoryPercentage = (int) Math.ceil(200.0 / memoryGb);
int testMemoryMb = 768 * (memoryGb - 2);
String servicesXml =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<services xmlns:deploy='vespa' version='1.0'>\n" +
" <container version='1.0' id='default'>\n" +
"\n" +
" <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" +
" <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" +
" <artifactsPath>artifacts</artifactsPath>\n" +
" <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" +
" </config>\n" +
" </component>\n" +
"\n" +
" <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" +
" <binding>http:
" </handler>\n" +
"\n" +
" <http>\n" +
" <server id='default' port='4080'/>\n" +
" <filtering>\n" +
" <access-control domain='" + domain + "'>\n" +
" <exclude>\n" +
" <binding>http:
" </exclude>\n" +
" </access-control>\n" +
" <request-chain id=\"testrunner-api\">\n" +
" <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" +
" <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" +
" <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" +
" </config>\n" +
" <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" +
" <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" +
" <resourceName>" + domain + ":tester-application</resourceName>\n" +
" <action>deploy</action>\n" +
" </config>\n" +
" </component>\n" +
" </filter>\n" +
" </request-chain>\n" +
" </filtering>\n" +
" </http>\n" +
"\n" +
" <nodes count=\"1\" flavor=\"" + flavor + "\" allocated-memory=\"" + jdiscMemoryPercentage + "%\" />\n" +
" </container>\n" +
"</services>\n";
return servicesXml.getBytes(StandardCharsets.UTF_8);
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
private static byte[] deploymentXml(Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
"<deployment version=\"1.0\" " +
athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("")
+ "/>";
return deploymentSpec.getBytes(StandardCharsets.UTF_8);
}
/** Returns the config for the tests to run for the given job. */
private static byte[] testConfig(ApplicationId id, ZoneId testerZone, SystemName system,
Map<ZoneId, List<URI>> deployments, Map<ZoneId, List<String>> clusters) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString("application", id.serializedForm());
root.setString("zone", testerZone.value());
root.setString("system", system.name());
Cursor endpointsObject = root.setObject("endpoints");
deployments.forEach((zone, endpoints) -> {
Cursor endpointArray = endpointsObject.setArray(zone.value());
for (URI endpoint : endpoints)
endpointArray.addString(endpoint.toString());
});
Cursor clustersObject = root.setObject("clusters");
clusters.forEach((zone, clusterList) -> {
Cursor clusterArray = clustersObject.setArray(zone.value());
for (String cluster : clusterList)
clusterArray.addString(cluster);
});
try {
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
private final RunId id;
private final Step step;
private DualLogger(RunId id, Step step) {
this.id = id;
this.step = step;
}
private void log(String... messages) {
log(List.of(messages));
}
private void log(List<String> messages) {
controller.jobController().log(id, step, DEBUG, messages);
}
private void log(Level level, String message) {
log(level, message, null);
}
private void log(Level level, String message, Throwable thrown) {
logger.log(level, id + " at " + step + ": " + message, thrown);
if (thrown != null) {
ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream();
thrown.printStackTrace(new PrintStream(traceBuffer));
message += "\n" + traceBuffer;
}
controller.jobController().log(id, step, level, message);
}
}
} |
This looks equivalent to `Executors.newFixedThreadPool()`( https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/util/concurrent/Executors.html#newFixedThreadPool(int,java.util.concurrent.ThreadFactory) ) | SimpleFeeder run() throws Throwable {
ExecutorService executor = (numThreads > 1)
? new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.SECONDS,
new SynchronousQueue<>(false),
ThreadFactoryFactory.getDaemonThreadFactory("perf-feeder"),
new ThreadPoolExecutor.CallerRunsPolicy())
: null;
VespaXMLFeedReader reader = new VespaXMLFeedReader(in, docTypeMgr);
printHeader();
long numMessages = 0;
while (failure.get() == null) {
VespaXMLFeedReader.Operation op = new VespaXMLFeedReader.Operation();
reader.read(op);
if (op.getType() == VespaXMLFeedReader.OperationType.INVALID) {
break;
}
if (executor != null) {
executor.execute(() -> sendOperation(op));
} else {
sendOperation(op);
}
++numMessages;
}
while (failure.get() == null && numReplies.get() < numMessages) {
Thread.sleep(100);
}
if (failure.get() != null) {
throw failure.get();
}
printReport();
return this;
} | ? new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.SECONDS, | SimpleFeeder run() throws Throwable {
ExecutorService executor = (numThreads > 1)
? new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.SECONDS,
new SynchronousQueue<>(false),
ThreadFactoryFactory.getDaemonThreadFactory("perf-feeder"),
new ThreadPoolExecutor.CallerRunsPolicy())
: null;
VespaXMLFeedReader reader = new VespaXMLFeedReader(in, docTypeMgr);
printHeader();
long numMessages = 0;
while (failure.get() == null) {
VespaXMLFeedReader.Operation op = new VespaXMLFeedReader.Operation();
reader.read(op);
if (op.getType() == VespaXMLFeedReader.OperationType.INVALID) {
break;
}
if (executor != null) {
executor.execute(() -> sendOperation(op));
} else {
sendOperation(op);
}
++numMessages;
}
while (failure.get() == null && numReplies.get() < numMessages) {
Thread.sleep(100);
}
if (failure.get() != null) {
throw failure.get();
}
printReport();
return this;
} | class SimpleFeeder implements ReplyHandler {
private final static long REPORT_INTERVAL = TimeUnit.SECONDS.toMillis(10);
private final static long HEADER_INTERVAL = REPORT_INTERVAL * 24;
private final DocumentTypeManager docTypeMgr = new DocumentTypeManager();
private final InputStream in;
private final PrintStream out;
private final PrintStream err;
private final RPCMessageBus mbus;
private final Route route;
private final SourceSession session;
private final long startTime = System.currentTimeMillis();
private final AtomicReference<Throwable> failure = new AtomicReference<>(null);
private final AtomicLong numReplies = new AtomicLong(0);
private long maxLatency = Long.MIN_VALUE;
private long minLatency = Long.MAX_VALUE;
private long nextHeader = startTime + HEADER_INTERVAL;
private long nextReport = startTime + REPORT_INTERVAL;
private long sumLatency = 0;
private final int numThreads;
public static void main(String[] args) throws Throwable {
new SimpleFeeder(new FeederParams().parseArgs(args)).run().close();
}
SimpleFeeder(FeederParams params) {
this.in = params.getStdIn();
this.out = params.getStdOut();
this.err = params.getStdErr();
this.route = params.getRoute();
this.numThreads = params.getNumDispatchThreads();
this.mbus = newMessageBus(docTypeMgr, params.getConfigId());
this.session = newSession(mbus, this, params.isSerialTransferEnabled());
this.docTypeMgr.configure(params.getConfigId());
}
private void sendOperation(VespaXMLFeedReader.Operation op) {
Message msg = newMessage(op);
if (msg == null) {
err.println("ignoring operation; " + op.getType());
return;
}
msg.setContext(System.currentTimeMillis());
msg.setRoute(route);
try {
Error err = session.sendBlocking(msg).getError();
if (err != null) {
failure.set(new IOException(err.toString()));
}
} catch (InterruptedException e) {}
}
void close() {
session.destroy();
mbus.destroy();
}
private Message newMessage(VespaXMLFeedReader.Operation op) {
switch (op.getType()) {
case DOCUMENT: {
PutDocumentMessage message = new PutDocumentMessage(new DocumentPut(op.getDocument()));
message.setCondition(op.getCondition());
return message;
}
case REMOVE: {
RemoveDocumentMessage message = new RemoveDocumentMessage(op.getRemove());
message.setCondition(op.getCondition());
return message;
}
case UPDATE: {
UpdateDocumentMessage message = new UpdateDocumentMessage(op.getDocumentUpdate());
message.setCondition(op.getCondition());
return message;
}
default:
return null;
}
}
@Override
public void handleReply(Reply reply) {
if (failure.get() != null) {
return;
}
if (reply.hasErrors()) {
failure.compareAndSet(null, new IOException(formatErrors(reply)));
return;
}
long now = System.currentTimeMillis();
long latency = now - (long) reply.getContext();
numReplies.incrementAndGet();
accumulateReplies(now, latency);
}
private synchronized void accumulateReplies(long now, long latency) {
minLatency = Math.min(minLatency, latency);
maxLatency = Math.max(maxLatency, latency);
sumLatency += latency;
if (now > nextHeader) {
printHeader();
nextHeader += HEADER_INTERVAL;
}
if (now > nextReport) {
printReport();
nextReport += REPORT_INTERVAL;
}
}
private void printHeader() {
out.println("total time, num messages, min latency, avg latency, max latency");
}
private void printReport() {
out.format("%10d, %12d, %11d, %11d, %11d\n", System.currentTimeMillis() - startTime,
numReplies.get(), minLatency, sumLatency / numReplies.get(), maxLatency);
}
private static String formatErrors(Reply reply) {
StringBuilder out = new StringBuilder();
out.append(reply.getMessage().toString()).append('\n');
for (int i = 0, len = reply.getNumErrors(); i < len; ++i) {
out.append(reply.getError(i).toString()).append('\n');
}
return out.toString();
}
private static RPCMessageBus newMessageBus(DocumentTypeManager docTypeMgr, String configId) {
return new RPCMessageBus(new MessageBusParams().addProtocol(new DocumentProtocol(docTypeMgr)),
new RPCNetworkParams().setSlobrokConfigId(configId),
configId);
}
private static SourceSession newSession(RPCMessageBus mbus, ReplyHandler replyHandler, boolean serial) {
SourceSessionParams params = new SourceSessionParams();
params.setReplyHandler(replyHandler);
if (serial) {
params.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(1));
}
return mbus.getMessageBus().createSourceSession(params);
}
} | class SimpleFeeder implements ReplyHandler {
private final static long REPORT_INTERVAL = TimeUnit.SECONDS.toMillis(10);
private final static long HEADER_INTERVAL = REPORT_INTERVAL * 24;
private final DocumentTypeManager docTypeMgr = new DocumentTypeManager();
private final InputStream in;
private final PrintStream out;
private final PrintStream err;
private final RPCMessageBus mbus;
private final Route route;
private final SourceSession session;
private final long startTime = System.currentTimeMillis();
private final AtomicReference<Throwable> failure = new AtomicReference<>(null);
private final AtomicLong numReplies = new AtomicLong(0);
private long maxLatency = Long.MIN_VALUE;
private long minLatency = Long.MAX_VALUE;
private long nextHeader = startTime + HEADER_INTERVAL;
private long nextReport = startTime + REPORT_INTERVAL;
private long sumLatency = 0;
private final int numThreads;
public static void main(String[] args) throws Throwable {
new SimpleFeeder(new FeederParams().parseArgs(args)).run().close();
}
SimpleFeeder(FeederParams params) {
this.in = params.getStdIn();
this.out = params.getStdOut();
this.err = params.getStdErr();
this.route = params.getRoute();
this.numThreads = params.getNumDispatchThreads();
this.mbus = newMessageBus(docTypeMgr, params.getConfigId());
this.session = newSession(mbus, this, params.isSerialTransferEnabled());
this.docTypeMgr.configure(params.getConfigId());
}
private void sendOperation(VespaXMLFeedReader.Operation op) {
Message msg = newMessage(op);
if (msg == null) {
err.println("ignoring operation; " + op.getType());
return;
}
msg.setContext(System.currentTimeMillis());
msg.setRoute(route);
try {
Error err = session.sendBlocking(msg).getError();
if (err != null) {
failure.set(new IOException(err.toString()));
}
} catch (InterruptedException e) {}
}
void close() {
session.destroy();
mbus.destroy();
}
private Message newMessage(VespaXMLFeedReader.Operation op) {
switch (op.getType()) {
case DOCUMENT: {
PutDocumentMessage message = new PutDocumentMessage(new DocumentPut(op.getDocument()));
message.setCondition(op.getCondition());
return message;
}
case REMOVE: {
RemoveDocumentMessage message = new RemoveDocumentMessage(op.getRemove());
message.setCondition(op.getCondition());
return message;
}
case UPDATE: {
UpdateDocumentMessage message = new UpdateDocumentMessage(op.getDocumentUpdate());
message.setCondition(op.getCondition());
return message;
}
default:
return null;
}
}
@Override
public void handleReply(Reply reply) {
if (failure.get() != null) {
return;
}
if (reply.hasErrors()) {
failure.compareAndSet(null, new IOException(formatErrors(reply)));
return;
}
long now = System.currentTimeMillis();
long latency = now - (long) reply.getContext();
numReplies.incrementAndGet();
accumulateReplies(now, latency);
}
private synchronized void accumulateReplies(long now, long latency) {
minLatency = Math.min(minLatency, latency);
maxLatency = Math.max(maxLatency, latency);
sumLatency += latency;
if (now > nextHeader) {
printHeader();
nextHeader += HEADER_INTERVAL;
}
if (now > nextReport) {
printReport();
nextReport += REPORT_INTERVAL;
}
}
private void printHeader() {
out.println("total time, num messages, min latency, avg latency, max latency");
}
private void printReport() {
out.format("%10d, %12d, %11d, %11d, %11d\n", System.currentTimeMillis() - startTime,
numReplies.get(), minLatency, sumLatency / numReplies.get(), maxLatency);
}
private static String formatErrors(Reply reply) {
StringBuilder out = new StringBuilder();
out.append(reply.getMessage().toString()).append('\n');
for (int i = 0, len = reply.getNumErrors(); i < len; ++i) {
out.append(reply.getError(i).toString()).append('\n');
}
return out.toString();
}
private static RPCMessageBus newMessageBus(DocumentTypeManager docTypeMgr, String configId) {
return new RPCMessageBus(new MessageBusParams().addProtocol(new DocumentProtocol(docTypeMgr)),
new RPCNetworkParams().setSlobrokConfigId(configId),
configId);
}
private static SourceSession newSession(RPCMessageBus mbus, ReplyHandler replyHandler, boolean serial) {
SourceSessionParams params = new SourceSessionParams();
params.setReplyHandler(replyHandler);
if (serial) {
params.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(1));
}
return mbus.getMessageBus().createSourceSession(params);
}
} |
You can not control the Queue and the Policy of failed execution there. | SimpleFeeder run() throws Throwable {
ExecutorService executor = (numThreads > 1)
? new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.SECONDS,
new SynchronousQueue<>(false),
ThreadFactoryFactory.getDaemonThreadFactory("perf-feeder"),
new ThreadPoolExecutor.CallerRunsPolicy())
: null;
VespaXMLFeedReader reader = new VespaXMLFeedReader(in, docTypeMgr);
printHeader();
long numMessages = 0;
while (failure.get() == null) {
VespaXMLFeedReader.Operation op = new VespaXMLFeedReader.Operation();
reader.read(op);
if (op.getType() == VespaXMLFeedReader.OperationType.INVALID) {
break;
}
if (executor != null) {
executor.execute(() -> sendOperation(op));
} else {
sendOperation(op);
}
++numMessages;
}
while (failure.get() == null && numReplies.get() < numMessages) {
Thread.sleep(100);
}
if (failure.get() != null) {
throw failure.get();
}
printReport();
return this;
} | ? new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.SECONDS, | SimpleFeeder run() throws Throwable {
ExecutorService executor = (numThreads > 1)
? new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.SECONDS,
new SynchronousQueue<>(false),
ThreadFactoryFactory.getDaemonThreadFactory("perf-feeder"),
new ThreadPoolExecutor.CallerRunsPolicy())
: null;
VespaXMLFeedReader reader = new VespaXMLFeedReader(in, docTypeMgr);
printHeader();
long numMessages = 0;
while (failure.get() == null) {
VespaXMLFeedReader.Operation op = new VespaXMLFeedReader.Operation();
reader.read(op);
if (op.getType() == VespaXMLFeedReader.OperationType.INVALID) {
break;
}
if (executor != null) {
executor.execute(() -> sendOperation(op));
} else {
sendOperation(op);
}
++numMessages;
}
while (failure.get() == null && numReplies.get() < numMessages) {
Thread.sleep(100);
}
if (failure.get() != null) {
throw failure.get();
}
printReport();
return this;
} | class SimpleFeeder implements ReplyHandler {
private final static long REPORT_INTERVAL = TimeUnit.SECONDS.toMillis(10);
private final static long HEADER_INTERVAL = REPORT_INTERVAL * 24;
private final DocumentTypeManager docTypeMgr = new DocumentTypeManager();
private final InputStream in;
private final PrintStream out;
private final PrintStream err;
private final RPCMessageBus mbus;
private final Route route;
private final SourceSession session;
private final long startTime = System.currentTimeMillis();
private final AtomicReference<Throwable> failure = new AtomicReference<>(null);
private final AtomicLong numReplies = new AtomicLong(0);
private long maxLatency = Long.MIN_VALUE;
private long minLatency = Long.MAX_VALUE;
private long nextHeader = startTime + HEADER_INTERVAL;
private long nextReport = startTime + REPORT_INTERVAL;
private long sumLatency = 0;
private final int numThreads;
public static void main(String[] args) throws Throwable {
new SimpleFeeder(new FeederParams().parseArgs(args)).run().close();
}
SimpleFeeder(FeederParams params) {
this.in = params.getStdIn();
this.out = params.getStdOut();
this.err = params.getStdErr();
this.route = params.getRoute();
this.numThreads = params.getNumDispatchThreads();
this.mbus = newMessageBus(docTypeMgr, params.getConfigId());
this.session = newSession(mbus, this, params.isSerialTransferEnabled());
this.docTypeMgr.configure(params.getConfigId());
}
private void sendOperation(VespaXMLFeedReader.Operation op) {
Message msg = newMessage(op);
if (msg == null) {
err.println("ignoring operation; " + op.getType());
return;
}
msg.setContext(System.currentTimeMillis());
msg.setRoute(route);
try {
Error err = session.sendBlocking(msg).getError();
if (err != null) {
failure.set(new IOException(err.toString()));
}
} catch (InterruptedException e) {}
}
void close() {
session.destroy();
mbus.destroy();
}
private Message newMessage(VespaXMLFeedReader.Operation op) {
switch (op.getType()) {
case DOCUMENT: {
PutDocumentMessage message = new PutDocumentMessage(new DocumentPut(op.getDocument()));
message.setCondition(op.getCondition());
return message;
}
case REMOVE: {
RemoveDocumentMessage message = new RemoveDocumentMessage(op.getRemove());
message.setCondition(op.getCondition());
return message;
}
case UPDATE: {
UpdateDocumentMessage message = new UpdateDocumentMessage(op.getDocumentUpdate());
message.setCondition(op.getCondition());
return message;
}
default:
return null;
}
}
@Override
public void handleReply(Reply reply) {
if (failure.get() != null) {
return;
}
if (reply.hasErrors()) {
failure.compareAndSet(null, new IOException(formatErrors(reply)));
return;
}
long now = System.currentTimeMillis();
long latency = now - (long) reply.getContext();
numReplies.incrementAndGet();
accumulateReplies(now, latency);
}
private synchronized void accumulateReplies(long now, long latency) {
minLatency = Math.min(minLatency, latency);
maxLatency = Math.max(maxLatency, latency);
sumLatency += latency;
if (now > nextHeader) {
printHeader();
nextHeader += HEADER_INTERVAL;
}
if (now > nextReport) {
printReport();
nextReport += REPORT_INTERVAL;
}
}
private void printHeader() {
out.println("total time, num messages, min latency, avg latency, max latency");
}
private void printReport() {
out.format("%10d, %12d, %11d, %11d, %11d\n", System.currentTimeMillis() - startTime,
numReplies.get(), minLatency, sumLatency / numReplies.get(), maxLatency);
}
private static String formatErrors(Reply reply) {
StringBuilder out = new StringBuilder();
out.append(reply.getMessage().toString()).append('\n');
for (int i = 0, len = reply.getNumErrors(); i < len; ++i) {
out.append(reply.getError(i).toString()).append('\n');
}
return out.toString();
}
private static RPCMessageBus newMessageBus(DocumentTypeManager docTypeMgr, String configId) {
return new RPCMessageBus(new MessageBusParams().addProtocol(new DocumentProtocol(docTypeMgr)),
new RPCNetworkParams().setSlobrokConfigId(configId),
configId);
}
private static SourceSession newSession(RPCMessageBus mbus, ReplyHandler replyHandler, boolean serial) {
SourceSessionParams params = new SourceSessionParams();
params.setReplyHandler(replyHandler);
if (serial) {
params.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(1));
}
return mbus.getMessageBus().createSourceSession(params);
}
} | class SimpleFeeder implements ReplyHandler {
private final static long REPORT_INTERVAL = TimeUnit.SECONDS.toMillis(10);
private final static long HEADER_INTERVAL = REPORT_INTERVAL * 24;
private final DocumentTypeManager docTypeMgr = new DocumentTypeManager();
private final InputStream in;
private final PrintStream out;
private final PrintStream err;
private final RPCMessageBus mbus;
private final Route route;
private final SourceSession session;
private final long startTime = System.currentTimeMillis();
private final AtomicReference<Throwable> failure = new AtomicReference<>(null);
private final AtomicLong numReplies = new AtomicLong(0);
private long maxLatency = Long.MIN_VALUE;
private long minLatency = Long.MAX_VALUE;
private long nextHeader = startTime + HEADER_INTERVAL;
private long nextReport = startTime + REPORT_INTERVAL;
private long sumLatency = 0;
private final int numThreads;
public static void main(String[] args) throws Throwable {
new SimpleFeeder(new FeederParams().parseArgs(args)).run().close();
}
SimpleFeeder(FeederParams params) {
this.in = params.getStdIn();
this.out = params.getStdOut();
this.err = params.getStdErr();
this.route = params.getRoute();
this.numThreads = params.getNumDispatchThreads();
this.mbus = newMessageBus(docTypeMgr, params.getConfigId());
this.session = newSession(mbus, this, params.isSerialTransferEnabled());
this.docTypeMgr.configure(params.getConfigId());
}
private void sendOperation(VespaXMLFeedReader.Operation op) {
Message msg = newMessage(op);
if (msg == null) {
err.println("ignoring operation; " + op.getType());
return;
}
msg.setContext(System.currentTimeMillis());
msg.setRoute(route);
try {
Error err = session.sendBlocking(msg).getError();
if (err != null) {
failure.set(new IOException(err.toString()));
}
} catch (InterruptedException e) {}
}
void close() {
session.destroy();
mbus.destroy();
}
private Message newMessage(VespaXMLFeedReader.Operation op) {
switch (op.getType()) {
case DOCUMENT: {
PutDocumentMessage message = new PutDocumentMessage(new DocumentPut(op.getDocument()));
message.setCondition(op.getCondition());
return message;
}
case REMOVE: {
RemoveDocumentMessage message = new RemoveDocumentMessage(op.getRemove());
message.setCondition(op.getCondition());
return message;
}
case UPDATE: {
UpdateDocumentMessage message = new UpdateDocumentMessage(op.getDocumentUpdate());
message.setCondition(op.getCondition());
return message;
}
default:
return null;
}
}
@Override
public void handleReply(Reply reply) {
if (failure.get() != null) {
return;
}
if (reply.hasErrors()) {
failure.compareAndSet(null, new IOException(formatErrors(reply)));
return;
}
long now = System.currentTimeMillis();
long latency = now - (long) reply.getContext();
numReplies.incrementAndGet();
accumulateReplies(now, latency);
}
private synchronized void accumulateReplies(long now, long latency) {
minLatency = Math.min(minLatency, latency);
maxLatency = Math.max(maxLatency, latency);
sumLatency += latency;
if (now > nextHeader) {
printHeader();
nextHeader += HEADER_INTERVAL;
}
if (now > nextReport) {
printReport();
nextReport += REPORT_INTERVAL;
}
}
private void printHeader() {
out.println("total time, num messages, min latency, avg latency, max latency");
}
private void printReport() {
out.format("%10d, %12d, %11d, %11d, %11d\n", System.currentTimeMillis() - startTime,
numReplies.get(), minLatency, sumLatency / numReplies.get(), maxLatency);
}
private static String formatErrors(Reply reply) {
StringBuilder out = new StringBuilder();
out.append(reply.getMessage().toString()).append('\n');
for (int i = 0, len = reply.getNumErrors(); i < len; ++i) {
out.append(reply.getError(i).toString()).append('\n');
}
return out.toString();
}
private static RPCMessageBus newMessageBus(DocumentTypeManager docTypeMgr, String configId) {
return new RPCMessageBus(new MessageBusParams().addProtocol(new DocumentProtocol(docTypeMgr)),
new RPCNetworkParams().setSlobrokConfigId(configId),
configId);
}
private static SourceSession newSession(RPCMessageBus mbus, ReplyHandler replyHandler, boolean serial) {
SourceSessionParams params = new SourceSessionParams();
params.setReplyHandler(replyHandler);
if (serial) {
params.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(1));
}
return mbus.getMessageBus().createSourceSession(params);
}
} |
Consider wrapping in unmodifiableSet. | Set<Role> roles(AthenzPrincipal principal, URI uri) {
Path path = new Path(uri);
path.matches("/application/v4/tenant/{tenant}/{*}");
Optional<Tenant> tenant = Optional.ofNullable(path.get("tenant")).map(TenantName::from).flatMap(tenants::get);
path.matches("/application/v4/tenant/{tenant}/application/{application}/{*}");
Optional<ApplicationName> application = Optional.ofNullable(path.get("application")).map(ApplicationName::from);
AthenzIdentity identity = principal.getIdentity();
if (athenz.hasHostedOperatorAccess(identity))
return Set.of(Role.hostedOperator());
Set<Role> roleMemberships = new HashSet<>();
if (tenant.isPresent() && isTenantAdmin(identity, tenant.get()))
roleMemberships.add(Role.athenzTenantAdmin(tenant.get().name()));
if (identity.getDomain().equals(SCREWDRIVER_DOMAIN) && application.isPresent() && tenant.isPresent())
if ( tenant.get().type() != Tenant.Type.athenz
|| hasDeployerAccess(identity, ((AthenzTenant) tenant.get()).domain(), application.get()))
roleMemberships.add(Role.tenantPipeline(tenant.get().name(), application.get()));
return roleMemberships.isEmpty()
? Set.of(Role.everyone())
: roleMemberships;
} | : roleMemberships; | Set<Role> roles(AthenzPrincipal principal, URI uri) {
Path path = new Path(uri);
path.matches("/application/v4/tenant/{tenant}/{*}");
Optional<Tenant> tenant = Optional.ofNullable(path.get("tenant")).map(TenantName::from).flatMap(tenants::get);
path.matches("/application/v4/tenant/{tenant}/application/{application}/{*}");
Optional<ApplicationName> application = Optional.ofNullable(path.get("application")).map(ApplicationName::from);
AthenzIdentity identity = principal.getIdentity();
if (athenz.hasHostedOperatorAccess(identity))
return Set.of(Role.hostedOperator());
Set<Role> roleMemberships = new HashSet<>();
if (tenant.isPresent() && isTenantAdmin(identity, tenant.get()))
roleMemberships.add(Role.athenzTenantAdmin(tenant.get().name()));
if (identity.getDomain().equals(SCREWDRIVER_DOMAIN) && application.isPresent() && tenant.isPresent())
if ( tenant.get().type() != Tenant.Type.athenz
|| hasDeployerAccess(identity, ((AthenzTenant) tenant.get()).domain(), application.get()))
roleMemberships.add(Role.tenantPipeline(tenant.get().name(), application.get()));
return roleMemberships.isEmpty()
? Set.of(Role.everyone())
: Set.copyOf(roleMemberships);
} | class AthenzRoleFilter extends CorsRequestFilterBase {
private static final Logger logger = Logger.getLogger(AthenzRoleFilter.class.getName());
private final AthenzFacade athenz;
private final TenantController tenants;
@Inject
public AthenzRoleFilter(CorsFilterConfig config, AthenzClientFactory athenzClientFactory, Controller controller) {
super(Set.copyOf(config.allowedUrls()));
this.athenz = new AthenzFacade(athenzClientFactory);
this.tenants = controller.tenants();
}
@Override
protected Optional<ErrorResponse> filterRequest(DiscFilterRequest request) {
try {
AthenzPrincipal athenzPrincipal = (AthenzPrincipal) request.getUserPrincipal();
request.setAttribute(SecurityContext.ATTRIBUTE_NAME, new SecurityContext(athenzPrincipal,
roles(athenzPrincipal, request.getUri())));
return Optional.empty();
}
catch (Exception e) {
logger.log(LogLevel.DEBUG, () -> "Exception mapping Athenz principal to roles: " + Exceptions.toMessageString(e));
return Optional.of(new ErrorResponse(Response.Status.UNAUTHORIZED, "Access denied"));
}
}
private boolean isTenantAdmin(AthenzIdentity identity, Tenant tenant) {
switch (tenant.type()) {
case athenz: return athenz.hasTenantAdminAccess(identity, ((AthenzTenant) tenant).domain());
case user: return ((UserTenant) tenant).is(identity.getName()) || athenz.hasHostedOperatorAccess(identity);
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
}
private boolean hasDeployerAccess(AthenzIdentity identity, AthenzDomain tenantDomain, ApplicationName application) {
try {
return athenz.hasApplicationAccess(identity,
ApplicationAction.deploy,
tenantDomain,
application);
} catch (ZmsClientException e) {
throw new RuntimeException("Failed to authorize operation: (" + e.getMessage() + ")", e);
}
}
} | class AthenzRoleFilter extends CorsRequestFilterBase {
private static final Logger logger = Logger.getLogger(AthenzRoleFilter.class.getName());
private final AthenzFacade athenz;
private final TenantController tenants;
@Inject
public AthenzRoleFilter(CorsFilterConfig config, AthenzClientFactory athenzClientFactory, Controller controller) {
super(Set.copyOf(config.allowedUrls()));
this.athenz = new AthenzFacade(athenzClientFactory);
this.tenants = controller.tenants();
}
@Override
protected Optional<ErrorResponse> filterRequest(DiscFilterRequest request) {
try {
AthenzPrincipal athenzPrincipal = (AthenzPrincipal) request.getUserPrincipal();
request.setAttribute(SecurityContext.ATTRIBUTE_NAME, new SecurityContext(athenzPrincipal,
roles(athenzPrincipal, request.getUri())));
return Optional.empty();
}
catch (Exception e) {
logger.log(LogLevel.DEBUG, () -> "Exception mapping Athenz principal to roles: " + Exceptions.toMessageString(e));
return Optional.of(new ErrorResponse(Response.Status.UNAUTHORIZED, "Access denied"));
}
}
private boolean isTenantAdmin(AthenzIdentity identity, Tenant tenant) {
switch (tenant.type()) {
case athenz: return athenz.hasTenantAdminAccess(identity, ((AthenzTenant) tenant).domain());
case user: return ((UserTenant) tenant).is(identity.getName()) || athenz.hasHostedOperatorAccess(identity);
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
}
private boolean hasDeployerAccess(AthenzIdentity identity, AthenzDomain tenantDomain, ApplicationName application) {
try {
return athenz.hasApplicationAccess(identity,
ApplicationAction.deploy,
tenantDomain,
application);
} catch (ZmsClientException e) {
throw new RuntimeException("Failed to authorize operation: (" + e.getMessage() + ")", e);
}
}
} |
Can cpuPeriod() and cpuQuota() be updated to return long? | private CreateContainerCmd createCreateContainerCmd() {
List<Bind> volumeBinds = volumeBindSpecs.stream().map(Bind::parse).collect(Collectors.toList());
final HostConfig hostConfig = new HostConfig()
.withSecurityOpts(new ArrayList<>(securityOpts))
.withBinds(volumeBinds)
.withUlimits(ulimits)
.withCapAdd(addCapabilities.toArray(new Capability[0]))
.withCapDrop(dropCapabilities.toArray(new Capability[0]))
.withPrivileged(privileged);
containerResources.ifPresent(cr -> hostConfig
.withCpuShares(cr.cpuShares())
.withMemory(cr.memoryBytes())
.withMemorySwap(cr.memoryBytes())
.withCpuPeriod(cr.cpuQuota() > 0 ? (long) cr.cpuPeriod() : null)
.withCpuQuota(cr.cpuQuota() > 0 ? (long) cr.cpuQuota() : null));
final CreateContainerCmd containerCmd = docker
.createContainerCmd(dockerImage.asString())
.withHostConfig(hostConfig)
.withName(containerName.asString())
.withLabels(labels)
.withEnv(environmentAssignments);
networkMode
.filter(mode -> ! mode.toLowerCase().equals("host"))
.ifPresent(mode -> containerCmd.withMacAddress(generateMACAddress(hostName, ipv4Address, ipv6Address)));
hostName.ifPresent(containerCmd::withHostName);
networkMode.ifPresent(hostConfig::withNetworkMode);
ipv4Address.ifPresent(containerCmd::withIpv4Address);
ipv6Address.ifPresent(containerCmd::withIpv6Address);
entrypoint.ifPresent(containerCmd::withEntrypoint);
return containerCmd;
} | .withCpuPeriod(cr.cpuQuota() > 0 ? (long) cr.cpuPeriod() : null) | private CreateContainerCmd createCreateContainerCmd() {
List<Bind> volumeBinds = volumeBindSpecs.stream().map(Bind::parse).collect(Collectors.toList());
final HostConfig hostConfig = new HostConfig()
.withSecurityOpts(new ArrayList<>(securityOpts))
.withBinds(volumeBinds)
.withUlimits(ulimits)
.withCapAdd(addCapabilities.toArray(new Capability[0]))
.withCapDrop(dropCapabilities.toArray(new Capability[0]))
.withPrivileged(privileged);
containerResources.ifPresent(cr -> hostConfig
.withCpuShares(cr.cpuShares())
.withMemory(cr.memoryBytes())
.withMemorySwap(cr.memoryBytes())
.withCpuPeriod(cr.cpuQuota() > 0 ? (long) cr.cpuPeriod() : null)
.withCpuQuota(cr.cpuQuota() > 0 ? (long) cr.cpuQuota() : null));
final CreateContainerCmd containerCmd = docker
.createContainerCmd(dockerImage.asString())
.withHostConfig(hostConfig)
.withName(containerName.asString())
.withLabels(labels)
.withEnv(environmentAssignments);
networkMode
.filter(mode -> ! mode.toLowerCase().equals("host"))
.ifPresent(mode -> containerCmd.withMacAddress(generateMACAddress(hostName, ipv4Address, ipv6Address)));
hostName.ifPresent(containerCmd::withHostName);
networkMode.ifPresent(hostConfig::withNetworkMode);
ipv4Address.ifPresent(containerCmd::withIpv4Address);
ipv6Address.ifPresent(containerCmd::withIpv6Address);
entrypoint.ifPresent(containerCmd::withEntrypoint);
return containerCmd;
} | class CreateContainerCommandImpl implements Docker.CreateContainerCommand {
private final DockerClient docker;
private final DockerImage dockerImage;
private final ContainerName containerName;
private final Map<String, String> labels = new HashMap<>();
private final List<String> environmentAssignments = new ArrayList<>();
private final List<String> volumeBindSpecs = new ArrayList<>();
private final List<Ulimit> ulimits = new ArrayList<>();
private final Set<Capability> addCapabilities = new HashSet<>();
private final Set<Capability> dropCapabilities = new HashSet<>();
private final Set<String> securityOpts = new HashSet<>();
private Optional<String> hostName = Optional.empty();
private Optional<ContainerResources> containerResources = Optional.empty();
private Optional<String> networkMode = Optional.empty();
private Optional<String> ipv4Address = Optional.empty();
private Optional<String> ipv6Address = Optional.empty();
private Optional<String[]> entrypoint = Optional.empty();
private boolean privileged = false;
CreateContainerCommandImpl(DockerClient docker, DockerImage dockerImage, ContainerName containerName) {
this.docker = docker;
this.dockerImage = dockerImage;
this.containerName = containerName;
}
@Override
public Docker.CreateContainerCommand withHostName(String hostName) {
this.hostName = Optional.of(hostName);
return this;
}
@Override
public Docker.CreateContainerCommand withResources(ContainerResources containerResources) {
this.containerResources = Optional.of(containerResources);
return this;
}
@Override
public Docker.CreateContainerCommand withLabel(String name, String value) {
assert !name.contains("=");
labels.put(name, value);
return this;
}
public Docker.CreateContainerCommand withManagedBy(String manager) {
return withLabel(LABEL_NAME_MANAGEDBY, manager);
}
@Override
public Docker.CreateContainerCommand withAddCapability(String capabilityName) {
addCapabilities.add(Capability.valueOf(capabilityName));
return this;
}
@Override
public Docker.CreateContainerCommand withDropCapability(String capabilityName) {
dropCapabilities.add(Capability.valueOf(capabilityName));
return this;
}
@Override
public Docker.CreateContainerCommand withSecurityOpts(String securityOpt) {
securityOpts.add(securityOpt);
return this;
}
@Override
public Docker.CreateContainerCommand withPrivileged(boolean privileged) {
this.privileged = privileged;
return this;
}
@Override
public Docker.CreateContainerCommand withUlimit(String name, int softLimit, int hardLimit) {
ulimits.add(new Ulimit(name, softLimit, hardLimit));
return this;
}
@Override
public Docker.CreateContainerCommand withEntrypoint(String... entrypoint) {
if (entrypoint.length < 1) throw new IllegalArgumentException("Entrypoint must contain at least 1 element");
this.entrypoint = Optional.of(entrypoint);
return this;
}
@Override
public Docker.CreateContainerCommand withEnvironment(String name, String value) {
assert name.indexOf('=') == -1;
environmentAssignments.add(name + "=" + value);
return this;
}
@Override
public Docker.CreateContainerCommand withVolume(Path path, Path volumePath) {
volumeBindSpecs.add(path + ":" + volumePath + ":Z");
return this;
}
@Override
public Docker.CreateContainerCommand withSharedVolume(Path path, Path volumePath) {
volumeBindSpecs.add(path + ":" + volumePath + ":z");
return this;
}
@Override
public Docker.CreateContainerCommand withNetworkMode(String mode) {
networkMode = Optional.of(mode);
return this;
}
@Override
public Docker.CreateContainerCommand withIpAddress(InetAddress address) {
if (address instanceof Inet6Address) {
ipv6Address = Optional.of(address.getHostAddress());
} else {
ipv4Address = Optional.of(address.getHostAddress());
}
return this;
}
@Override
public void create() {
try {
createCreateContainerCmd().exec();
} catch (RuntimeException e) {
throw new DockerException("Failed to create container " + toString(), e);
}
}
/** Maps ("--env", {"A", "B", "C"}) to "--env A --env B --env C" */
private static String toRepeatedOption(String option, Collection<String> optionValues) {
return optionValues.stream()
.map(optionValue -> option + " " + optionValue)
.collect(Collectors.joining(" "));
}
private static String toOptionalOption(String option, Optional<?> value) {
return value.map(o -> option + " " + o).orElse("");
}
private static String toFlagOption(String option, boolean value) {
return value ? option : "";
}
/** Make toString() print the equivalent arguments to 'docker run' */
@Override
public String toString() {
List<String> labelList = labels.entrySet().stream()
.map(entry -> entry.getKey() + "=" + entry.getValue()).collect(Collectors.toList());
List<String> ulimitList = ulimits.stream()
.map(ulimit -> ulimit.getName() + "=" + ulimit.getSoft() + ":" + ulimit.getHard())
.collect(Collectors.toList());
List<String> addCapabilitiesList = addCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList());
List<String> dropCapabilitiesList = dropCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList());
Optional<String> entrypointExecuteable = entrypoint.map(args -> args[0]);
String entrypointArgs = entrypoint.map(Stream::of).orElseGet(Stream::empty)
.skip(1)
.collect(Collectors.joining(" "));
return Stream.of(
"--name " + containerName.asString(),
toOptionalOption("--hostname", hostName),
toOptionalOption("--cpu-shares", containerResources.map(ContainerResources::cpuShares)),
toOptionalOption("--cpus", containerResources.map(ContainerResources::cpus)),
toOptionalOption("--memory", containerResources.map(ContainerResources::memoryBytes)),
toRepeatedOption("--label", labelList),
toRepeatedOption("--ulimit", ulimitList),
toRepeatedOption("--env", environmentAssignments),
toRepeatedOption("--volume", volumeBindSpecs),
toRepeatedOption("--cap-add", addCapabilitiesList),
toRepeatedOption("--cap-drop", dropCapabilitiesList),
toRepeatedOption("--security-opt", securityOpts),
toOptionalOption("--net", networkMode),
toOptionalOption("--ip", ipv4Address),
toOptionalOption("--ip6", ipv6Address),
toOptionalOption("--entrypoint", entrypointExecuteable),
toFlagOption("--privileged", privileged),
dockerImage.asString(),
entrypointArgs)
.filter(s -> !s.isEmpty())
.collect(Collectors.joining(" "));
}
/**
* Generates a pseudo-random MAC address based on the hostname, IPv4- and IPv6-address.
*/
static String generateMACAddress(Optional<String> hostname, Optional<String> ipv4Address, Optional<String> ipv6Address) {
final String seed = hostname.orElse("") + ipv4Address.orElse("") + ipv6Address.orElse("");
Random rand = getPRNG(seed);
byte[] macAddr = new byte[6];
rand.nextBytes(macAddr);
macAddr[0] = (byte) ((macAddr[0] | 2) & 254);
return IntStream.range(0, macAddr.length)
.mapToObj(i -> String.format("%02x", macAddr[i]))
.collect(Collectors.joining(":"));
}
private static Random getPRNG(String seed) {
try {
SecureRandom rand = SecureRandom.getInstance("SHA1PRNG");
rand.setSeed(seed.getBytes());
return rand;
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException("Failed to get pseudo-random number generator", e);
}
}
} | class CreateContainerCommandImpl implements Docker.CreateContainerCommand {
private final DockerClient docker;
private final DockerImage dockerImage;
private final ContainerName containerName;
private final Map<String, String> labels = new HashMap<>();
private final List<String> environmentAssignments = new ArrayList<>();
private final List<String> volumeBindSpecs = new ArrayList<>();
private final List<Ulimit> ulimits = new ArrayList<>();
private final Set<Capability> addCapabilities = new HashSet<>();
private final Set<Capability> dropCapabilities = new HashSet<>();
private final Set<String> securityOpts = new HashSet<>();
private Optional<String> hostName = Optional.empty();
private Optional<ContainerResources> containerResources = Optional.empty();
private Optional<String> networkMode = Optional.empty();
private Optional<String> ipv4Address = Optional.empty();
private Optional<String> ipv6Address = Optional.empty();
private Optional<String[]> entrypoint = Optional.empty();
private boolean privileged = false;
CreateContainerCommandImpl(DockerClient docker, DockerImage dockerImage, ContainerName containerName) {
this.docker = docker;
this.dockerImage = dockerImage;
this.containerName = containerName;
}
@Override
public Docker.CreateContainerCommand withHostName(String hostName) {
this.hostName = Optional.of(hostName);
return this;
}
@Override
public Docker.CreateContainerCommand withResources(ContainerResources containerResources) {
this.containerResources = Optional.of(containerResources);
return this;
}
@Override
public Docker.CreateContainerCommand withLabel(String name, String value) {
assert !name.contains("=");
labels.put(name, value);
return this;
}
public Docker.CreateContainerCommand withManagedBy(String manager) {
return withLabel(LABEL_NAME_MANAGEDBY, manager);
}
@Override
public Docker.CreateContainerCommand withAddCapability(String capabilityName) {
addCapabilities.add(Capability.valueOf(capabilityName));
return this;
}
@Override
public Docker.CreateContainerCommand withDropCapability(String capabilityName) {
dropCapabilities.add(Capability.valueOf(capabilityName));
return this;
}
@Override
public Docker.CreateContainerCommand withSecurityOpts(String securityOpt) {
securityOpts.add(securityOpt);
return this;
}
@Override
public Docker.CreateContainerCommand withPrivileged(boolean privileged) {
this.privileged = privileged;
return this;
}
@Override
public Docker.CreateContainerCommand withUlimit(String name, int softLimit, int hardLimit) {
ulimits.add(new Ulimit(name, softLimit, hardLimit));
return this;
}
@Override
public Docker.CreateContainerCommand withEntrypoint(String... entrypoint) {
if (entrypoint.length < 1) throw new IllegalArgumentException("Entrypoint must contain at least 1 element");
this.entrypoint = Optional.of(entrypoint);
return this;
}
@Override
public Docker.CreateContainerCommand withEnvironment(String name, String value) {
assert name.indexOf('=') == -1;
environmentAssignments.add(name + "=" + value);
return this;
}
@Override
public Docker.CreateContainerCommand withVolume(Path path, Path volumePath) {
volumeBindSpecs.add(path + ":" + volumePath + ":Z");
return this;
}
@Override
public Docker.CreateContainerCommand withSharedVolume(Path path, Path volumePath) {
volumeBindSpecs.add(path + ":" + volumePath + ":z");
return this;
}
@Override
public Docker.CreateContainerCommand withNetworkMode(String mode) {
networkMode = Optional.of(mode);
return this;
}
@Override
public Docker.CreateContainerCommand withIpAddress(InetAddress address) {
if (address instanceof Inet6Address) {
ipv6Address = Optional.of(address.getHostAddress());
} else {
ipv4Address = Optional.of(address.getHostAddress());
}
return this;
}
@Override
public void create() {
try {
createCreateContainerCmd().exec();
} catch (RuntimeException e) {
throw new DockerException("Failed to create container " + toString(), e);
}
}
/** Maps ("--env", {"A", "B", "C"}) to "--env A --env B --env C" */
private static String toRepeatedOption(String option, Collection<String> optionValues) {
return optionValues.stream()
.map(optionValue -> option + " " + optionValue)
.collect(Collectors.joining(" "));
}
private static String toOptionalOption(String option, Optional<?> value) {
return value.map(o -> option + " " + o).orElse("");
}
private static String toFlagOption(String option, boolean value) {
return value ? option : "";
}
/** Make toString() print the equivalent arguments to 'docker run' */
@Override
public String toString() {
List<String> labelList = labels.entrySet().stream()
.map(entry -> entry.getKey() + "=" + entry.getValue()).collect(Collectors.toList());
List<String> ulimitList = ulimits.stream()
.map(ulimit -> ulimit.getName() + "=" + ulimit.getSoft() + ":" + ulimit.getHard())
.collect(Collectors.toList());
List<String> addCapabilitiesList = addCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList());
List<String> dropCapabilitiesList = dropCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList());
Optional<String> entrypointExecuteable = entrypoint.map(args -> args[0]);
String entrypointArgs = entrypoint.map(Stream::of).orElseGet(Stream::empty)
.skip(1)
.collect(Collectors.joining(" "));
return Stream.of(
"--name " + containerName.asString(),
toOptionalOption("--hostname", hostName),
toOptionalOption("--cpu-shares", containerResources.map(ContainerResources::cpuShares)),
toOptionalOption("--cpus", containerResources.map(ContainerResources::cpus)),
toOptionalOption("--memory", containerResources.map(ContainerResources::memoryBytes)),
toRepeatedOption("--label", labelList),
toRepeatedOption("--ulimit", ulimitList),
toRepeatedOption("--env", environmentAssignments),
toRepeatedOption("--volume", volumeBindSpecs),
toRepeatedOption("--cap-add", addCapabilitiesList),
toRepeatedOption("--cap-drop", dropCapabilitiesList),
toRepeatedOption("--security-opt", securityOpts),
toOptionalOption("--net", networkMode),
toOptionalOption("--ip", ipv4Address),
toOptionalOption("--ip6", ipv6Address),
toOptionalOption("--entrypoint", entrypointExecuteable),
toFlagOption("--privileged", privileged),
dockerImage.asString(),
entrypointArgs)
.filter(s -> !s.isEmpty())
.collect(Collectors.joining(" "));
}
/**
* Generates a pseudo-random MAC address based on the hostname, IPv4- and IPv6-address.
*/
static String generateMACAddress(Optional<String> hostname, Optional<String> ipv4Address, Optional<String> ipv6Address) {
final String seed = hostname.orElse("") + ipv4Address.orElse("") + ipv6Address.orElse("");
Random rand = getPRNG(seed);
byte[] macAddr = new byte[6];
rand.nextBytes(macAddr);
macAddr[0] = (byte) ((macAddr[0] | 2) & 254);
return IntStream.range(0, macAddr.length)
.mapToObj(i -> String.format("%02x", macAddr[i]))
.collect(Collectors.joining(":"));
}
private static Random getPRNG(String seed) {
try {
SecureRandom rand = SecureRandom.getInstance("SHA1PRNG");
rand.setSeed(seed.getBytes());
return rand;
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException("Failed to get pseudo-random number generator", e);
}
}
} |
Not really, for some reason the update command requires `Integer`: https://github.com/docker-java/docker-java/blob/c5c89327108abdc50aa41d7711d4743b8bac75fe/src/main/java/com/github/dockerjava/api/command/UpdateContainerCmd.java#L25:L32 | private CreateContainerCmd createCreateContainerCmd() {
List<Bind> volumeBinds = volumeBindSpecs.stream().map(Bind::parse).collect(Collectors.toList());
final HostConfig hostConfig = new HostConfig()
.withSecurityOpts(new ArrayList<>(securityOpts))
.withBinds(volumeBinds)
.withUlimits(ulimits)
.withCapAdd(addCapabilities.toArray(new Capability[0]))
.withCapDrop(dropCapabilities.toArray(new Capability[0]))
.withPrivileged(privileged);
containerResources.ifPresent(cr -> hostConfig
.withCpuShares(cr.cpuShares())
.withMemory(cr.memoryBytes())
.withMemorySwap(cr.memoryBytes())
.withCpuPeriod(cr.cpuQuota() > 0 ? (long) cr.cpuPeriod() : null)
.withCpuQuota(cr.cpuQuota() > 0 ? (long) cr.cpuQuota() : null));
final CreateContainerCmd containerCmd = docker
.createContainerCmd(dockerImage.asString())
.withHostConfig(hostConfig)
.withName(containerName.asString())
.withLabels(labels)
.withEnv(environmentAssignments);
networkMode
.filter(mode -> ! mode.toLowerCase().equals("host"))
.ifPresent(mode -> containerCmd.withMacAddress(generateMACAddress(hostName, ipv4Address, ipv6Address)));
hostName.ifPresent(containerCmd::withHostName);
networkMode.ifPresent(hostConfig::withNetworkMode);
ipv4Address.ifPresent(containerCmd::withIpv4Address);
ipv6Address.ifPresent(containerCmd::withIpv6Address);
entrypoint.ifPresent(containerCmd::withEntrypoint);
return containerCmd;
} | .withCpuPeriod(cr.cpuQuota() > 0 ? (long) cr.cpuPeriod() : null) | private CreateContainerCmd createCreateContainerCmd() {
List<Bind> volumeBinds = volumeBindSpecs.stream().map(Bind::parse).collect(Collectors.toList());
final HostConfig hostConfig = new HostConfig()
.withSecurityOpts(new ArrayList<>(securityOpts))
.withBinds(volumeBinds)
.withUlimits(ulimits)
.withCapAdd(addCapabilities.toArray(new Capability[0]))
.withCapDrop(dropCapabilities.toArray(new Capability[0]))
.withPrivileged(privileged);
containerResources.ifPresent(cr -> hostConfig
.withCpuShares(cr.cpuShares())
.withMemory(cr.memoryBytes())
.withMemorySwap(cr.memoryBytes())
.withCpuPeriod(cr.cpuQuota() > 0 ? (long) cr.cpuPeriod() : null)
.withCpuQuota(cr.cpuQuota() > 0 ? (long) cr.cpuQuota() : null));
final CreateContainerCmd containerCmd = docker
.createContainerCmd(dockerImage.asString())
.withHostConfig(hostConfig)
.withName(containerName.asString())
.withLabels(labels)
.withEnv(environmentAssignments);
networkMode
.filter(mode -> ! mode.toLowerCase().equals("host"))
.ifPresent(mode -> containerCmd.withMacAddress(generateMACAddress(hostName, ipv4Address, ipv6Address)));
hostName.ifPresent(containerCmd::withHostName);
networkMode.ifPresent(hostConfig::withNetworkMode);
ipv4Address.ifPresent(containerCmd::withIpv4Address);
ipv6Address.ifPresent(containerCmd::withIpv6Address);
entrypoint.ifPresent(containerCmd::withEntrypoint);
return containerCmd;
} | class CreateContainerCommandImpl implements Docker.CreateContainerCommand {
private final DockerClient docker;
private final DockerImage dockerImage;
private final ContainerName containerName;
private final Map<String, String> labels = new HashMap<>();
private final List<String> environmentAssignments = new ArrayList<>();
private final List<String> volumeBindSpecs = new ArrayList<>();
private final List<Ulimit> ulimits = new ArrayList<>();
private final Set<Capability> addCapabilities = new HashSet<>();
private final Set<Capability> dropCapabilities = new HashSet<>();
private final Set<String> securityOpts = new HashSet<>();
private Optional<String> hostName = Optional.empty();
private Optional<ContainerResources> containerResources = Optional.empty();
private Optional<String> networkMode = Optional.empty();
private Optional<String> ipv4Address = Optional.empty();
private Optional<String> ipv6Address = Optional.empty();
private Optional<String[]> entrypoint = Optional.empty();
private boolean privileged = false;
CreateContainerCommandImpl(DockerClient docker, DockerImage dockerImage, ContainerName containerName) {
this.docker = docker;
this.dockerImage = dockerImage;
this.containerName = containerName;
}
@Override
public Docker.CreateContainerCommand withHostName(String hostName) {
this.hostName = Optional.of(hostName);
return this;
}
@Override
public Docker.CreateContainerCommand withResources(ContainerResources containerResources) {
this.containerResources = Optional.of(containerResources);
return this;
}
@Override
public Docker.CreateContainerCommand withLabel(String name, String value) {
assert !name.contains("=");
labels.put(name, value);
return this;
}
public Docker.CreateContainerCommand withManagedBy(String manager) {
return withLabel(LABEL_NAME_MANAGEDBY, manager);
}
@Override
public Docker.CreateContainerCommand withAddCapability(String capabilityName) {
addCapabilities.add(Capability.valueOf(capabilityName));
return this;
}
@Override
public Docker.CreateContainerCommand withDropCapability(String capabilityName) {
dropCapabilities.add(Capability.valueOf(capabilityName));
return this;
}
@Override
public Docker.CreateContainerCommand withSecurityOpts(String securityOpt) {
securityOpts.add(securityOpt);
return this;
}
@Override
public Docker.CreateContainerCommand withPrivileged(boolean privileged) {
this.privileged = privileged;
return this;
}
@Override
public Docker.CreateContainerCommand withUlimit(String name, int softLimit, int hardLimit) {
ulimits.add(new Ulimit(name, softLimit, hardLimit));
return this;
}
@Override
public Docker.CreateContainerCommand withEntrypoint(String... entrypoint) {
if (entrypoint.length < 1) throw new IllegalArgumentException("Entrypoint must contain at least 1 element");
this.entrypoint = Optional.of(entrypoint);
return this;
}
@Override
public Docker.CreateContainerCommand withEnvironment(String name, String value) {
assert name.indexOf('=') == -1;
environmentAssignments.add(name + "=" + value);
return this;
}
@Override
public Docker.CreateContainerCommand withVolume(Path path, Path volumePath) {
volumeBindSpecs.add(path + ":" + volumePath + ":Z");
return this;
}
@Override
public Docker.CreateContainerCommand withSharedVolume(Path path, Path volumePath) {
volumeBindSpecs.add(path + ":" + volumePath + ":z");
return this;
}
@Override
public Docker.CreateContainerCommand withNetworkMode(String mode) {
networkMode = Optional.of(mode);
return this;
}
@Override
public Docker.CreateContainerCommand withIpAddress(InetAddress address) {
if (address instanceof Inet6Address) {
ipv6Address = Optional.of(address.getHostAddress());
} else {
ipv4Address = Optional.of(address.getHostAddress());
}
return this;
}
@Override
public void create() {
try {
createCreateContainerCmd().exec();
} catch (RuntimeException e) {
throw new DockerException("Failed to create container " + toString(), e);
}
}
/** Maps ("--env", {"A", "B", "C"}) to "--env A --env B --env C" */
private static String toRepeatedOption(String option, Collection<String> optionValues) {
return optionValues.stream()
.map(optionValue -> option + " " + optionValue)
.collect(Collectors.joining(" "));
}
private static String toOptionalOption(String option, Optional<?> value) {
return value.map(o -> option + " " + o).orElse("");
}
private static String toFlagOption(String option, boolean value) {
return value ? option : "";
}
/** Make toString() print the equivalent arguments to 'docker run' */
@Override
public String toString() {
List<String> labelList = labels.entrySet().stream()
.map(entry -> entry.getKey() + "=" + entry.getValue()).collect(Collectors.toList());
List<String> ulimitList = ulimits.stream()
.map(ulimit -> ulimit.getName() + "=" + ulimit.getSoft() + ":" + ulimit.getHard())
.collect(Collectors.toList());
List<String> addCapabilitiesList = addCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList());
List<String> dropCapabilitiesList = dropCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList());
Optional<String> entrypointExecuteable = entrypoint.map(args -> args[0]);
String entrypointArgs = entrypoint.map(Stream::of).orElseGet(Stream::empty)
.skip(1)
.collect(Collectors.joining(" "));
return Stream.of(
"--name " + containerName.asString(),
toOptionalOption("--hostname", hostName),
toOptionalOption("--cpu-shares", containerResources.map(ContainerResources::cpuShares)),
toOptionalOption("--cpus", containerResources.map(ContainerResources::cpus)),
toOptionalOption("--memory", containerResources.map(ContainerResources::memoryBytes)),
toRepeatedOption("--label", labelList),
toRepeatedOption("--ulimit", ulimitList),
toRepeatedOption("--env", environmentAssignments),
toRepeatedOption("--volume", volumeBindSpecs),
toRepeatedOption("--cap-add", addCapabilitiesList),
toRepeatedOption("--cap-drop", dropCapabilitiesList),
toRepeatedOption("--security-opt", securityOpts),
toOptionalOption("--net", networkMode),
toOptionalOption("--ip", ipv4Address),
toOptionalOption("--ip6", ipv6Address),
toOptionalOption("--entrypoint", entrypointExecuteable),
toFlagOption("--privileged", privileged),
dockerImage.asString(),
entrypointArgs)
.filter(s -> !s.isEmpty())
.collect(Collectors.joining(" "));
}
/**
* Generates a pseudo-random MAC address based on the hostname, IPv4- and IPv6-address.
*/
static String generateMACAddress(Optional<String> hostname, Optional<String> ipv4Address, Optional<String> ipv6Address) {
final String seed = hostname.orElse("") + ipv4Address.orElse("") + ipv6Address.orElse("");
Random rand = getPRNG(seed);
byte[] macAddr = new byte[6];
rand.nextBytes(macAddr);
macAddr[0] = (byte) ((macAddr[0] | 2) & 254);
return IntStream.range(0, macAddr.length)
.mapToObj(i -> String.format("%02x", macAddr[i]))
.collect(Collectors.joining(":"));
}
private static Random getPRNG(String seed) {
try {
SecureRandom rand = SecureRandom.getInstance("SHA1PRNG");
rand.setSeed(seed.getBytes());
return rand;
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException("Failed to get pseudo-random number generator", e);
}
}
} | class CreateContainerCommandImpl implements Docker.CreateContainerCommand {
private final DockerClient docker;
private final DockerImage dockerImage;
private final ContainerName containerName;
private final Map<String, String> labels = new HashMap<>();
private final List<String> environmentAssignments = new ArrayList<>();
private final List<String> volumeBindSpecs = new ArrayList<>();
private final List<Ulimit> ulimits = new ArrayList<>();
private final Set<Capability> addCapabilities = new HashSet<>();
private final Set<Capability> dropCapabilities = new HashSet<>();
private final Set<String> securityOpts = new HashSet<>();
private Optional<String> hostName = Optional.empty();
private Optional<ContainerResources> containerResources = Optional.empty();
private Optional<String> networkMode = Optional.empty();
private Optional<String> ipv4Address = Optional.empty();
private Optional<String> ipv6Address = Optional.empty();
private Optional<String[]> entrypoint = Optional.empty();
private boolean privileged = false;
CreateContainerCommandImpl(DockerClient docker, DockerImage dockerImage, ContainerName containerName) {
this.docker = docker;
this.dockerImage = dockerImage;
this.containerName = containerName;
}
@Override
public Docker.CreateContainerCommand withHostName(String hostName) {
this.hostName = Optional.of(hostName);
return this;
}
@Override
public Docker.CreateContainerCommand withResources(ContainerResources containerResources) {
this.containerResources = Optional.of(containerResources);
return this;
}
@Override
public Docker.CreateContainerCommand withLabel(String name, String value) {
assert !name.contains("=");
labels.put(name, value);
return this;
}
public Docker.CreateContainerCommand withManagedBy(String manager) {
return withLabel(LABEL_NAME_MANAGEDBY, manager);
}
@Override
public Docker.CreateContainerCommand withAddCapability(String capabilityName) {
addCapabilities.add(Capability.valueOf(capabilityName));
return this;
}
@Override
public Docker.CreateContainerCommand withDropCapability(String capabilityName) {
dropCapabilities.add(Capability.valueOf(capabilityName));
return this;
}
@Override
public Docker.CreateContainerCommand withSecurityOpts(String securityOpt) {
securityOpts.add(securityOpt);
return this;
}
@Override
public Docker.CreateContainerCommand withPrivileged(boolean privileged) {
this.privileged = privileged;
return this;
}
@Override
public Docker.CreateContainerCommand withUlimit(String name, int softLimit, int hardLimit) {
ulimits.add(new Ulimit(name, softLimit, hardLimit));
return this;
}
@Override
public Docker.CreateContainerCommand withEntrypoint(String... entrypoint) {
if (entrypoint.length < 1) throw new IllegalArgumentException("Entrypoint must contain at least 1 element");
this.entrypoint = Optional.of(entrypoint);
return this;
}
@Override
public Docker.CreateContainerCommand withEnvironment(String name, String value) {
assert name.indexOf('=') == -1;
environmentAssignments.add(name + "=" + value);
return this;
}
@Override
public Docker.CreateContainerCommand withVolume(Path path, Path volumePath) {
volumeBindSpecs.add(path + ":" + volumePath + ":Z");
return this;
}
@Override
public Docker.CreateContainerCommand withSharedVolume(Path path, Path volumePath) {
volumeBindSpecs.add(path + ":" + volumePath + ":z");
return this;
}
@Override
public Docker.CreateContainerCommand withNetworkMode(String mode) {
networkMode = Optional.of(mode);
return this;
}
@Override
public Docker.CreateContainerCommand withIpAddress(InetAddress address) {
if (address instanceof Inet6Address) {
ipv6Address = Optional.of(address.getHostAddress());
} else {
ipv4Address = Optional.of(address.getHostAddress());
}
return this;
}
@Override
public void create() {
try {
createCreateContainerCmd().exec();
} catch (RuntimeException e) {
throw new DockerException("Failed to create container " + toString(), e);
}
}
/** Maps ("--env", {"A", "B", "C"}) to "--env A --env B --env C" */
private static String toRepeatedOption(String option, Collection<String> optionValues) {
return optionValues.stream()
.map(optionValue -> option + " " + optionValue)
.collect(Collectors.joining(" "));
}
private static String toOptionalOption(String option, Optional<?> value) {
return value.map(o -> option + " " + o).orElse("");
}
private static String toFlagOption(String option, boolean value) {
return value ? option : "";
}
/** Make toString() print the equivalent arguments to 'docker run' */
@Override
public String toString() {
List<String> labelList = labels.entrySet().stream()
.map(entry -> entry.getKey() + "=" + entry.getValue()).collect(Collectors.toList());
List<String> ulimitList = ulimits.stream()
.map(ulimit -> ulimit.getName() + "=" + ulimit.getSoft() + ":" + ulimit.getHard())
.collect(Collectors.toList());
List<String> addCapabilitiesList = addCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList());
List<String> dropCapabilitiesList = dropCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList());
Optional<String> entrypointExecuteable = entrypoint.map(args -> args[0]);
String entrypointArgs = entrypoint.map(Stream::of).orElseGet(Stream::empty)
.skip(1)
.collect(Collectors.joining(" "));
return Stream.of(
"--name " + containerName.asString(),
toOptionalOption("--hostname", hostName),
toOptionalOption("--cpu-shares", containerResources.map(ContainerResources::cpuShares)),
toOptionalOption("--cpus", containerResources.map(ContainerResources::cpus)),
toOptionalOption("--memory", containerResources.map(ContainerResources::memoryBytes)),
toRepeatedOption("--label", labelList),
toRepeatedOption("--ulimit", ulimitList),
toRepeatedOption("--env", environmentAssignments),
toRepeatedOption("--volume", volumeBindSpecs),
toRepeatedOption("--cap-add", addCapabilitiesList),
toRepeatedOption("--cap-drop", dropCapabilitiesList),
toRepeatedOption("--security-opt", securityOpts),
toOptionalOption("--net", networkMode),
toOptionalOption("--ip", ipv4Address),
toOptionalOption("--ip6", ipv6Address),
toOptionalOption("--entrypoint", entrypointExecuteable),
toFlagOption("--privileged", privileged),
dockerImage.asString(),
entrypointArgs)
.filter(s -> !s.isEmpty())
.collect(Collectors.joining(" "));
}
/**
* Generates a pseudo-random MAC address based on the hostname, IPv4- and IPv6-address.
*/
static String generateMACAddress(Optional<String> hostname, Optional<String> ipv4Address, Optional<String> ipv6Address) {
final String seed = hostname.orElse("") + ipv4Address.orElse("") + ipv6Address.orElse("");
Random rand = getPRNG(seed);
byte[] macAddr = new byte[6];
rand.nextBytes(macAddr);
macAddr[0] = (byte) ((macAddr[0] | 2) & 254);
return IntStream.range(0, macAddr.length)
.mapToObj(i -> String.format("%02x", macAddr[i]))
.collect(Collectors.joining(":"));
}
private static Random getPRNG(String seed) {
try {
SecureRandom rand = SecureRandom.getInstance("SHA1PRNG");
rand.setSeed(seed.getBytes());
return rand;
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException("Failed to get pseudo-random number generator", e);
}
}
} |
Consider updating error message to indicate appropriate command line option instead of referring to HTTP headers (or just remove it entirely) | public FeedResponse handle(HttpRequest request, RouteMetricSet.ProgressCallback callback, int numThreads) {
MessagePropertyProcessor.PropertySetter properties = getPropertyProcessor().buildPropertySetter(request);
String route = properties.getRoute().toString();
FeedResponse response = new FeedResponse(new RouteMetricSet(route, callback));
SingleSender sender = new SingleSender(response, getSharedSender(route));
sender.addMessageProcessor(properties);
ThreadedFeedAccess feedAccess = new ThreadedFeedAccess(numThreads, sender);
Feeder feeder = createFeeder(feedAccess, request);
feeder.setAbortOnDocumentError(properties.getAbortOnDocumentError());
feeder.setCreateIfNonExistent(properties.getCreateIfNonExistent());
response.setAbortOnFeedError(properties.getAbortOnFeedError());
List<String> errors = feeder.parse();
for (String s : errors) {
response.addXMLParseError(s);
}
if (errors.size() > 0 && feeder instanceof XMLFeeder) {
response.addXMLParseError("If you are trying to feed JSON, set the Content-Type header to application/json.");
}
sender.done();
feedAccess.close();
long millis = getTimeoutMillis(request);
boolean completed = sender.waitForPending(millis);
if (!completed) {
response.addError(Error.TIMEOUT, "Timed out after " + millis + " ms waiting for responses");
}
response.done();
return response;
} | response.addXMLParseError("If you are trying to feed JSON, set the Content-Type header to application/json."); | public FeedResponse handle(HttpRequest request, RouteMetricSet.ProgressCallback callback, int numThreads) {
MessagePropertyProcessor.PropertySetter properties = getPropertyProcessor().buildPropertySetter(request);
String route = properties.getRoute().toString();
FeedResponse response = new FeedResponse(new RouteMetricSet(route, callback));
SingleSender sender = new SingleSender(response, getSharedSender(route));
sender.addMessageProcessor(properties);
ThreadedFeedAccess feedAccess = new ThreadedFeedAccess(numThreads, sender);
Feeder feeder = createFeeder(feedAccess, request);
feeder.setAbortOnDocumentError(properties.getAbortOnDocumentError());
feeder.setCreateIfNonExistent(properties.getCreateIfNonExistent());
response.setAbortOnFeedError(properties.getAbortOnFeedError());
List<String> errors = feeder.parse();
for (String s : errors) {
response.addXMLParseError(s);
}
sender.done();
feedAccess.close();
long millis = getTimeoutMillis(request);
boolean completed = sender.waitForPending(millis);
if (!completed) {
response.addError(Error.TIMEOUT, "Timed out after " + millis + " ms waiting for responses");
}
response.done();
return response;
} | class VespaFeedHandler extends VespaFeedHandlerBase {
public static final String JSON_INPUT = "jsonInput";
private VespaFeedHandler(FeedContext context) {
super(context);
}
public static VespaFeedHandler createFromContext(FeedContext context) {
return new VespaFeedHandler(context);
}
private Feeder createFeeder(SimpleFeedAccess sender, HttpRequest request) {
if (Boolean.valueOf(request.getProperty(JSON_INPUT))) {
return new JsonFeeder(getDocumentTypeManager(), sender, getRequestInputStream(request));
} else {
return new XMLFeeder(getDocumentTypeManager(), sender, getRequestInputStream(request));
}
}
} | class VespaFeedHandler extends VespaFeedHandlerBase {
public static final String JSON_INPUT = "jsonInput";
private VespaFeedHandler(FeedContext context) {
super(context);
}
public static VespaFeedHandler createFromContext(FeedContext context) {
return new VespaFeedHandler(context);
}
private Feeder createFeeder(SimpleFeedAccess sender, HttpRequest request) {
if (Boolean.valueOf(request.getProperty(JSON_INPUT))) {
return new JsonFeeder(getDocumentTypeManager(), sender, getRequestInputStream(request));
} else {
return new XMLFeeder(getDocumentTypeManager(), sender, getRequestInputStream(request));
}
}
} |
Removed :) | public FeedResponse handle(HttpRequest request, RouteMetricSet.ProgressCallback callback, int numThreads) {
MessagePropertyProcessor.PropertySetter properties = getPropertyProcessor().buildPropertySetter(request);
String route = properties.getRoute().toString();
FeedResponse response = new FeedResponse(new RouteMetricSet(route, callback));
SingleSender sender = new SingleSender(response, getSharedSender(route));
sender.addMessageProcessor(properties);
ThreadedFeedAccess feedAccess = new ThreadedFeedAccess(numThreads, sender);
Feeder feeder = createFeeder(feedAccess, request);
feeder.setAbortOnDocumentError(properties.getAbortOnDocumentError());
feeder.setCreateIfNonExistent(properties.getCreateIfNonExistent());
response.setAbortOnFeedError(properties.getAbortOnFeedError());
List<String> errors = feeder.parse();
for (String s : errors) {
response.addXMLParseError(s);
}
if (errors.size() > 0 && feeder instanceof XMLFeeder) {
response.addXMLParseError("If you are trying to feed JSON, set the Content-Type header to application/json.");
}
sender.done();
feedAccess.close();
long millis = getTimeoutMillis(request);
boolean completed = sender.waitForPending(millis);
if (!completed) {
response.addError(Error.TIMEOUT, "Timed out after " + millis + " ms waiting for responses");
}
response.done();
return response;
} | response.addXMLParseError("If you are trying to feed JSON, set the Content-Type header to application/json."); | public FeedResponse handle(HttpRequest request, RouteMetricSet.ProgressCallback callback, int numThreads) {
MessagePropertyProcessor.PropertySetter properties = getPropertyProcessor().buildPropertySetter(request);
String route = properties.getRoute().toString();
FeedResponse response = new FeedResponse(new RouteMetricSet(route, callback));
SingleSender sender = new SingleSender(response, getSharedSender(route));
sender.addMessageProcessor(properties);
ThreadedFeedAccess feedAccess = new ThreadedFeedAccess(numThreads, sender);
Feeder feeder = createFeeder(feedAccess, request);
feeder.setAbortOnDocumentError(properties.getAbortOnDocumentError());
feeder.setCreateIfNonExistent(properties.getCreateIfNonExistent());
response.setAbortOnFeedError(properties.getAbortOnFeedError());
List<String> errors = feeder.parse();
for (String s : errors) {
response.addXMLParseError(s);
}
sender.done();
feedAccess.close();
long millis = getTimeoutMillis(request);
boolean completed = sender.waitForPending(millis);
if (!completed) {
response.addError(Error.TIMEOUT, "Timed out after " + millis + " ms waiting for responses");
}
response.done();
return response;
} | class VespaFeedHandler extends VespaFeedHandlerBase {
public static final String JSON_INPUT = "jsonInput";
private VespaFeedHandler(FeedContext context) {
super(context);
}
public static VespaFeedHandler createFromContext(FeedContext context) {
return new VespaFeedHandler(context);
}
private Feeder createFeeder(SimpleFeedAccess sender, HttpRequest request) {
if (Boolean.valueOf(request.getProperty(JSON_INPUT))) {
return new JsonFeeder(getDocumentTypeManager(), sender, getRequestInputStream(request));
} else {
return new XMLFeeder(getDocumentTypeManager(), sender, getRequestInputStream(request));
}
}
} | class VespaFeedHandler extends VespaFeedHandlerBase {
public static final String JSON_INPUT = "jsonInput";
private VespaFeedHandler(FeedContext context) {
super(context);
}
public static VespaFeedHandler createFromContext(FeedContext context) {
return new VespaFeedHandler(context);
}
private Feeder createFeeder(SimpleFeedAccess sender, HttpRequest request) {
if (Boolean.valueOf(request.getProperty(JSON_INPUT))) {
return new JsonFeeder(getDocumentTypeManager(), sender, getRequestInputStream(request));
} else {
return new XMLFeeder(getDocumentTypeManager(), sender, getRequestInputStream(request));
}
}
} |
Hash is not verified against actual contents, is this intentional? | public void read(VespaXMLFeedReader.Operation operation) throws Exception {
int read = in.read(prefix);
if (read != prefix.length) {
operation.setInvalid();
return;
}
ByteBuffer header = ByteBuffer.wrap(prefix);
int sz = header.getInt();
int type = header.getInt();
long hash = header.getLong();
byte [] blob = new byte[sz];
read = in.read(blob);
if (read != blob.length) {
throw new IllegalArgumentException("Underflow, failed reading " + blob.length + "bytes. Got " + read);
}
DocumentDeserializer deser = DocumentDeserializerFactory.createHead(mgr, GrowableByteBuffer.wrap(blob));
if (type == DOCUMENT) {
operation.setDocument(new Document(deser));
} else if (type == UPDATE) {
operation.setDocumentUpdate(new DocumentUpdate(deser));
} else if (type == REMOVE) {
operation.setRemove(new DocumentId(deser));
} else {
throw new IllegalArgumentException("Unknown operation " + type);
}
} | long hash = header.getLong(); | public void read(VespaXMLFeedReader.Operation operation) throws Exception {
int read = in.read(prefix);
if (read != prefix.length) {
operation.setInvalid();
return;
}
ByteBuffer header = ByteBuffer.wrap(prefix);
int sz = header.getInt();
int type = header.getInt();
long hash = header.getLong();
byte [] blob = new byte[sz];
read = in.read(blob);
if (read != blob.length) {
throw new IllegalArgumentException("Underflow, failed reading " + blob.length + "bytes. Got " + read);
}
long computedHash = VespaV1Destination.hash(blob, 0, blob.length);
if (computedHash != hash) {
throw new IllegalArgumentException("Hash mismatch, expected " + hash + ", got " + computedHash);
}
DocumentDeserializer deser = DocumentDeserializerFactory.createHead(mgr, GrowableByteBuffer.wrap(blob));
if (type == DOCUMENT) {
operation.setDocument(new Document(deser));
} else if (type == UPDATE) {
operation.setDocumentUpdate(new DocumentUpdate(deser));
} else if (type == REMOVE) {
operation.setRemove(new DocumentId(deser));
} else {
throw new IllegalArgumentException("Unknown operation " + type);
}
} | class VespaV1FeedReader implements FeedReader {
private final InputStream in;
private final DocumentTypeManager mgr;
private final byte[] prefix = new byte[16];
VespaV1FeedReader(InputStream in, DocumentTypeManager mgr) throws IOException {
this.in = in;
this.mgr = mgr;
byte [] header = new byte[2];
in.read(header);
if ((header[0] != 'V') && (header[1] != '1')) {
throw new IllegalArgumentException("Invalid Header " + Arrays.toString(header));
}
}
@Override
} | class VespaV1FeedReader implements FeedReader {
private final InputStream in;
private final DocumentTypeManager mgr;
private final byte[] prefix = new byte[16];
VespaV1FeedReader(InputStream in, DocumentTypeManager mgr) throws IOException {
this.in = in;
this.mgr = mgr;
byte [] header = new byte[2];
in.read(header);
if ((header[0] != 'V') && (header[1] != '1')) {
throw new IllegalArgumentException("Invalid Header " + Arrays.toString(header));
}
}
@Override
} |
Forgotten... :) | public void read(VespaXMLFeedReader.Operation operation) throws Exception {
int read = in.read(prefix);
if (read != prefix.length) {
operation.setInvalid();
return;
}
ByteBuffer header = ByteBuffer.wrap(prefix);
int sz = header.getInt();
int type = header.getInt();
long hash = header.getLong();
byte [] blob = new byte[sz];
read = in.read(blob);
if (read != blob.length) {
throw new IllegalArgumentException("Underflow, failed reading " + blob.length + "bytes. Got " + read);
}
DocumentDeserializer deser = DocumentDeserializerFactory.createHead(mgr, GrowableByteBuffer.wrap(blob));
if (type == DOCUMENT) {
operation.setDocument(new Document(deser));
} else if (type == UPDATE) {
operation.setDocumentUpdate(new DocumentUpdate(deser));
} else if (type == REMOVE) {
operation.setRemove(new DocumentId(deser));
} else {
throw new IllegalArgumentException("Unknown operation " + type);
}
} | long hash = header.getLong(); | public void read(VespaXMLFeedReader.Operation operation) throws Exception {
int read = in.read(prefix);
if (read != prefix.length) {
operation.setInvalid();
return;
}
ByteBuffer header = ByteBuffer.wrap(prefix);
int sz = header.getInt();
int type = header.getInt();
long hash = header.getLong();
byte [] blob = new byte[sz];
read = in.read(blob);
if (read != blob.length) {
throw new IllegalArgumentException("Underflow, failed reading " + blob.length + "bytes. Got " + read);
}
long computedHash = VespaV1Destination.hash(blob, 0, blob.length);
if (computedHash != hash) {
throw new IllegalArgumentException("Hash mismatch, expected " + hash + ", got " + computedHash);
}
DocumentDeserializer deser = DocumentDeserializerFactory.createHead(mgr, GrowableByteBuffer.wrap(blob));
if (type == DOCUMENT) {
operation.setDocument(new Document(deser));
} else if (type == UPDATE) {
operation.setDocumentUpdate(new DocumentUpdate(deser));
} else if (type == REMOVE) {
operation.setRemove(new DocumentId(deser));
} else {
throw new IllegalArgumentException("Unknown operation " + type);
}
} | class VespaV1FeedReader implements FeedReader {
private final InputStream in;
private final DocumentTypeManager mgr;
private final byte[] prefix = new byte[16];
VespaV1FeedReader(InputStream in, DocumentTypeManager mgr) throws IOException {
this.in = in;
this.mgr = mgr;
byte [] header = new byte[2];
in.read(header);
if ((header[0] != 'V') && (header[1] != '1')) {
throw new IllegalArgumentException("Invalid Header " + Arrays.toString(header));
}
}
@Override
} | class VespaV1FeedReader implements FeedReader {
private final InputStream in;
private final DocumentTypeManager mgr;
private final byte[] prefix = new byte[16];
VespaV1FeedReader(InputStream in, DocumentTypeManager mgr) throws IOException {
this.in = in;
this.mgr = mgr;
byte [] header = new byte[2];
in.read(header);
if ((header[0] != 'V') && (header[1] != '1')) {
throw new IllegalArgumentException("Invalid Header " + Arrays.toString(header));
}
}
@Override
} |
Same condition in if, should probably be ` == TensorType.Value.DOUBLE` here. | public static Builder of(TensorType type, DimensionSizes sizes) {
if (sizes.dimensions() != type.dimensions().size())
throw new IllegalArgumentException(sizes.dimensions() +
" is the wrong number of dimensions for " + type);
for (int i = 0; i < sizes.dimensions(); i++ ) {
Optional<Long> size = type.dimensions().get(i).size();
if (size.isPresent() && size.get() < sizes.size(i))
throw new IllegalArgumentException("Size of dimension " + type.dimensions().get(i).name() + " is " +
sizes.size(i) +
" but cannot be larger than " + size.get() + " in " + type);
}
if (type.valueType() == TensorType.Value.FLOAT)
return new IndexedFloatTensor.BoundFloatBuilder(type, sizes);
else if (type.valueType() == TensorType.Value.FLOAT)
return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes);
else
return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes);
} | else if (type.valueType() == TensorType.Value.FLOAT) | public static Builder of(TensorType type, DimensionSizes sizes) {
if (sizes.dimensions() != type.dimensions().size())
throw new IllegalArgumentException(sizes.dimensions() +
" is the wrong number of dimensions for " + type);
for (int i = 0; i < sizes.dimensions(); i++ ) {
Optional<Long> size = type.dimensions().get(i).size();
if (size.isPresent() && size.get() < sizes.size(i))
throw new IllegalArgumentException("Size of dimension " + type.dimensions().get(i).name() + " is " +
sizes.size(i) +
" but cannot be larger than " + size.get() + " in " + type);
}
if (type.valueType() == TensorType.Value.FLOAT)
return new IndexedFloatTensor.BoundFloatBuilder(type, sizes);
else if (type.valueType() == TensorType.Value.DOUBLE)
return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes);
else
return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes);
} | class Builder implements Tensor.Builder {
final TensorType type;
private Builder(TensorType type) {
this.type = type;
}
public static Builder of(TensorType type) {
if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension))
return of(type, BoundBuilder.dimensionSizesOf(type));
else
return new UnboundBuilder(type);
}
/**
* Create a builder with dimension size information for this instance. Must be one size entry per dimension,
* and, agree with the type size information when specified in the type.
* If sizes are completely specified in the type this size information is redundant.
*/
public abstract Builder cell(double value, long ... indexes);
public abstract Builder cell(float value, long ... indexes);
@Override
public TensorType type() { return type; }
@Override
public abstract IndexedTensor build();
} | class Builder implements Tensor.Builder {
final TensorType type;
private Builder(TensorType type) {
this.type = type;
}
public static Builder of(TensorType type) {
if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension))
return of(type, BoundBuilder.dimensionSizesOf(type));
else
return new UnboundBuilder(type);
}
/**
* Create a builder with dimension size information for this instance. Must be one size entry per dimension,
* and, agree with the type size information when specified in the type.
* If sizes are completely specified in the type this size information is redundant.
*/
public abstract Builder cell(double value, long ... indexes);
public abstract Builder cell(float value, long ... indexes);
@Override
public TensorType type() { return type; }
@Override
public abstract IndexedTensor build();
} |
Thanks! Functionally correct, but logically wrong ... | public static Builder of(TensorType type, DimensionSizes sizes) {
if (sizes.dimensions() != type.dimensions().size())
throw new IllegalArgumentException(sizes.dimensions() +
" is the wrong number of dimensions for " + type);
for (int i = 0; i < sizes.dimensions(); i++ ) {
Optional<Long> size = type.dimensions().get(i).size();
if (size.isPresent() && size.get() < sizes.size(i))
throw new IllegalArgumentException("Size of dimension " + type.dimensions().get(i).name() + " is " +
sizes.size(i) +
" but cannot be larger than " + size.get() + " in " + type);
}
if (type.valueType() == TensorType.Value.FLOAT)
return new IndexedFloatTensor.BoundFloatBuilder(type, sizes);
else if (type.valueType() == TensorType.Value.FLOAT)
return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes);
else
return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes);
} | else if (type.valueType() == TensorType.Value.FLOAT) | public static Builder of(TensorType type, DimensionSizes sizes) {
if (sizes.dimensions() != type.dimensions().size())
throw new IllegalArgumentException(sizes.dimensions() +
" is the wrong number of dimensions for " + type);
for (int i = 0; i < sizes.dimensions(); i++ ) {
Optional<Long> size = type.dimensions().get(i).size();
if (size.isPresent() && size.get() < sizes.size(i))
throw new IllegalArgumentException("Size of dimension " + type.dimensions().get(i).name() + " is " +
sizes.size(i) +
" but cannot be larger than " + size.get() + " in " + type);
}
if (type.valueType() == TensorType.Value.FLOAT)
return new IndexedFloatTensor.BoundFloatBuilder(type, sizes);
else if (type.valueType() == TensorType.Value.DOUBLE)
return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes);
else
return new IndexedDoubleTensor.BoundDoubleBuilder(type, sizes);
} | class Builder implements Tensor.Builder {
final TensorType type;
private Builder(TensorType type) {
this.type = type;
}
public static Builder of(TensorType type) {
if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension))
return of(type, BoundBuilder.dimensionSizesOf(type));
else
return new UnboundBuilder(type);
}
/**
* Create a builder with dimension size information for this instance. Must be one size entry per dimension,
* and, agree with the type size information when specified in the type.
* If sizes are completely specified in the type this size information is redundant.
*/
public abstract Builder cell(double value, long ... indexes);
public abstract Builder cell(float value, long ... indexes);
@Override
public TensorType type() { return type; }
@Override
public abstract IndexedTensor build();
} | class Builder implements Tensor.Builder {
final TensorType type;
private Builder(TensorType type) {
this.type = type;
}
public static Builder of(TensorType type) {
if (type.dimensions().stream().allMatch(d -> d instanceof TensorType.IndexedBoundDimension))
return of(type, BoundBuilder.dimensionSizesOf(type));
else
return new UnboundBuilder(type);
}
/**
* Create a builder with dimension size information for this instance. Must be one size entry per dimension,
* and, agree with the type size information when specified in the type.
* If sizes are completely specified in the type this size information is redundant.
*/
public abstract Builder cell(double value, long ... indexes);
public abstract Builder cell(float value, long ... indexes);
@Override
public TensorType type() { return type; }
@Override
public abstract IndexedTensor build();
} |
typo: Falied -> Failed | private static void clean(MappedByteBuffer mmap) {
if ((mmap == null) || !mmap.isDirect()) return;
try {
Class unsafeClass;
try {
unsafeClass = Class.forName("sun.misc.Unsafe");
} catch (Exception ex) {
unsafeClass = Class.forName("jdk.internal.misc.Unsafe");
}
Method clean = unsafeClass.getMethod("invokeCleaner", ByteBuffer.class);
clean.setAccessible(true);
Field theUnsafeField = unsafeClass.getDeclaredField("theUnsafe");
theUnsafeField.setAccessible(true);
Object theUnsafe = theUnsafeField.get(null);
clean.invoke(theUnsafe, mmap);
} catch (Exception e) {
throw new IllegalArgumentException("Falied unmapping ", e);
}
} | throw new IllegalArgumentException("Falied unmapping ", e); | private static void clean(MappedByteBuffer mmap) {
if ((mmap == null) || !mmap.isDirect()) return;
try {
Class unsafeClass;
try {
unsafeClass = Class.forName("sun.misc.Unsafe");
} catch (Exception ex) {
unsafeClass = Class.forName("jdk.internal.misc.Unsafe");
}
Method clean = unsafeClass.getMethod("invokeCleaner", ByteBuffer.class);
clean.setAccessible(true);
Field theUnsafeField = unsafeClass.getDeclaredField("theUnsafe");
theUnsafeField.setAccessible(true);
Object theUnsafe = theUnsafeField.get(null);
clean.invoke(theUnsafe, mmap);
} catch (Exception e) {
throw new IllegalArgumentException("Failed unmapping ", e);
}
} | class Maps implements Closeable {
Maps(FileInputStream file) throws IOException {
_header = file.getChannel().map(MapMode.READ_ONLY,0,256);
_header.order(ByteOrder.LITTLE_ENDIAN);
if (h_magic()!=2038637673) {
throw new IOException("Stream does not contain an FSA: Wrong file magic number " + h_magic());
}
_symbol_tab = file.getChannel().map(MapMode.READ_ONLY, 256, h_size());
_symbol_tab.order(ByteOrder.LITTLE_ENDIAN);
_state_tab = file.getChannel().map(MapMode.READ_ONLY, 256+h_size(), 4*h_size());
_state_tab.order(ByteOrder.LITTLE_ENDIAN);
_data = file.getChannel().map(MapMode.READ_ONLY, 256+5*h_size(), h_data_size());
_data.order(ByteOrder.LITTLE_ENDIAN);
if (h_has_phash()>0){
_phash = file.getChannel().map(MapMode.READ_ONLY, 256+5*h_size()+h_data_size(), 4*h_size());
_phash.order(ByteOrder.LITTLE_ENDIAN);
} else {
_phash = null;
}
_ok = true;
}
private int h_magic(){
return _header.getInt(0);
}
private int h_version(){
return _header.getInt(4);
}
private int h_checksum(){
return _header.getInt(8);
}
private int h_size(){
return _header.getInt(12);
}
private int h_start(){
return _header.getInt(16);
}
private int h_data_size(){
return _header.getInt(20);
}
private int h_data_type(){
return _header.getInt(24);
}
private int h_fixed_data_size(){
return _header.getInt(28);
}
private int h_has_phash(){
return _header.getInt(32);
}
private int h_serial(){
return _header.getInt(36);
}
private int hashDelta(int state, byte symbol){
int s=symbol;
if(s<0){
s+=256;
}
if(_ok && h_has_phash()==1 && s>0 && s<255){
if(getSymbol(state+s)==s){
return _phash.getInt(4*(state+s));
}
}
return 0;
}
private int delta(int state, byte symbol){
int s=symbol;
if(s<0){
s+=256;
}
if(_ok && s>0 && s<255){
if(getSymbol(state+s)==s){
return _state_tab.getInt(4*(state+s));
}
}
return 0;
}
private int getSymbol(int index){
int symbol = _symbol_tab.get(index);
if(symbol<0){
symbol += 256;
}
return symbol;
}
private boolean isFinal(int state){
return _ok && (getSymbol(state+255)==255);
}
@Override
public void close() {
clean(_header);
clean(_data);
clean(_phash);
clean(_state_tab);
clean(_symbol_tab);
}
private final MappedByteBuffer _header;
private final MappedByteBuffer _symbol_tab;
private final MappedByteBuffer _state_tab;
private final MappedByteBuffer _data;
private final MappedByteBuffer _phash;
private final boolean _ok;
} | class Maps implements Closeable {
Maps(FileInputStream file) throws IOException {
_header = file.getChannel().map(MapMode.READ_ONLY,0,256);
_header.order(ByteOrder.LITTLE_ENDIAN);
if (h_magic()!=2038637673) {
throw new IOException("Stream does not contain an FSA: Wrong file magic number " + h_magic());
}
_symbol_tab = file.getChannel().map(MapMode.READ_ONLY, 256, h_size());
_symbol_tab.order(ByteOrder.LITTLE_ENDIAN);
_state_tab = file.getChannel().map(MapMode.READ_ONLY, 256+h_size(), 4*h_size());
_state_tab.order(ByteOrder.LITTLE_ENDIAN);
_data = file.getChannel().map(MapMode.READ_ONLY, 256+5*h_size(), h_data_size());
_data.order(ByteOrder.LITTLE_ENDIAN);
if (h_has_phash()>0){
_phash = file.getChannel().map(MapMode.READ_ONLY, 256+5*h_size()+h_data_size(), 4*h_size());
_phash.order(ByteOrder.LITTLE_ENDIAN);
} else {
_phash = null;
}
_ok = true;
}
private int h_magic(){
return _header.getInt(0);
}
private int h_version(){
return _header.getInt(4);
}
private int h_checksum(){
return _header.getInt(8);
}
private int h_size(){
return _header.getInt(12);
}
private int h_start(){
return _header.getInt(16);
}
private int h_data_size(){
return _header.getInt(20);
}
private int h_data_type(){
return _header.getInt(24);
}
private int h_fixed_data_size(){
return _header.getInt(28);
}
private int h_has_phash(){
return _header.getInt(32);
}
private int h_serial(){
return _header.getInt(36);
}
private int hashDelta(int state, byte symbol){
int s=symbol;
if(s<0){
s+=256;
}
if(_ok && h_has_phash()==1 && s>0 && s<255){
if(getSymbol(state+s)==s){
return _phash.getInt(4*(state+s));
}
}
return 0;
}
private int delta(int state, byte symbol){
int s=symbol;
if(s<0){
s+=256;
}
if(_ok && s>0 && s<255){
if(getSymbol(state+s)==s){
return _state_tab.getInt(4*(state+s));
}
}
return 0;
}
private int getSymbol(int index){
int symbol = _symbol_tab.get(index);
if(symbol<0){
symbol += 256;
}
return symbol;
}
private boolean isFinal(int state){
return _ok && (getSymbol(state+255)==255);
}
@Override
public void close() {
clean(_header);
clean(_data);
clean(_phash);
clean(_state_tab);
clean(_symbol_tab);
}
private final MappedByteBuffer _header;
private final MappedByteBuffer _symbol_tab;
private final MappedByteBuffer _state_tab;
private final MappedByteBuffer _data;
private final MappedByteBuffer _phash;
private final boolean _ok;
} |
Fixed | private static void clean(MappedByteBuffer mmap) {
if ((mmap == null) || !mmap.isDirect()) return;
try {
Class unsafeClass;
try {
unsafeClass = Class.forName("sun.misc.Unsafe");
} catch (Exception ex) {
unsafeClass = Class.forName("jdk.internal.misc.Unsafe");
}
Method clean = unsafeClass.getMethod("invokeCleaner", ByteBuffer.class);
clean.setAccessible(true);
Field theUnsafeField = unsafeClass.getDeclaredField("theUnsafe");
theUnsafeField.setAccessible(true);
Object theUnsafe = theUnsafeField.get(null);
clean.invoke(theUnsafe, mmap);
} catch (Exception e) {
throw new IllegalArgumentException("Falied unmapping ", e);
}
} | throw new IllegalArgumentException("Falied unmapping ", e); | private static void clean(MappedByteBuffer mmap) {
if ((mmap == null) || !mmap.isDirect()) return;
try {
Class unsafeClass;
try {
unsafeClass = Class.forName("sun.misc.Unsafe");
} catch (Exception ex) {
unsafeClass = Class.forName("jdk.internal.misc.Unsafe");
}
Method clean = unsafeClass.getMethod("invokeCleaner", ByteBuffer.class);
clean.setAccessible(true);
Field theUnsafeField = unsafeClass.getDeclaredField("theUnsafe");
theUnsafeField.setAccessible(true);
Object theUnsafe = theUnsafeField.get(null);
clean.invoke(theUnsafe, mmap);
} catch (Exception e) {
throw new IllegalArgumentException("Failed unmapping ", e);
}
} | class Maps implements Closeable {
Maps(FileInputStream file) throws IOException {
_header = file.getChannel().map(MapMode.READ_ONLY,0,256);
_header.order(ByteOrder.LITTLE_ENDIAN);
if (h_magic()!=2038637673) {
throw new IOException("Stream does not contain an FSA: Wrong file magic number " + h_magic());
}
_symbol_tab = file.getChannel().map(MapMode.READ_ONLY, 256, h_size());
_symbol_tab.order(ByteOrder.LITTLE_ENDIAN);
_state_tab = file.getChannel().map(MapMode.READ_ONLY, 256+h_size(), 4*h_size());
_state_tab.order(ByteOrder.LITTLE_ENDIAN);
_data = file.getChannel().map(MapMode.READ_ONLY, 256+5*h_size(), h_data_size());
_data.order(ByteOrder.LITTLE_ENDIAN);
if (h_has_phash()>0){
_phash = file.getChannel().map(MapMode.READ_ONLY, 256+5*h_size()+h_data_size(), 4*h_size());
_phash.order(ByteOrder.LITTLE_ENDIAN);
} else {
_phash = null;
}
_ok = true;
}
private int h_magic(){
return _header.getInt(0);
}
private int h_version(){
return _header.getInt(4);
}
private int h_checksum(){
return _header.getInt(8);
}
private int h_size(){
return _header.getInt(12);
}
private int h_start(){
return _header.getInt(16);
}
private int h_data_size(){
return _header.getInt(20);
}
private int h_data_type(){
return _header.getInt(24);
}
private int h_fixed_data_size(){
return _header.getInt(28);
}
private int h_has_phash(){
return _header.getInt(32);
}
private int h_serial(){
return _header.getInt(36);
}
private int hashDelta(int state, byte symbol){
int s=symbol;
if(s<0){
s+=256;
}
if(_ok && h_has_phash()==1 && s>0 && s<255){
if(getSymbol(state+s)==s){
return _phash.getInt(4*(state+s));
}
}
return 0;
}
private int delta(int state, byte symbol){
int s=symbol;
if(s<0){
s+=256;
}
if(_ok && s>0 && s<255){
if(getSymbol(state+s)==s){
return _state_tab.getInt(4*(state+s));
}
}
return 0;
}
private int getSymbol(int index){
int symbol = _symbol_tab.get(index);
if(symbol<0){
symbol += 256;
}
return symbol;
}
private boolean isFinal(int state){
return _ok && (getSymbol(state+255)==255);
}
@Override
public void close() {
clean(_header);
clean(_data);
clean(_phash);
clean(_state_tab);
clean(_symbol_tab);
}
private final MappedByteBuffer _header;
private final MappedByteBuffer _symbol_tab;
private final MappedByteBuffer _state_tab;
private final MappedByteBuffer _data;
private final MappedByteBuffer _phash;
private final boolean _ok;
} | class Maps implements Closeable {
Maps(FileInputStream file) throws IOException {
_header = file.getChannel().map(MapMode.READ_ONLY,0,256);
_header.order(ByteOrder.LITTLE_ENDIAN);
if (h_magic()!=2038637673) {
throw new IOException("Stream does not contain an FSA: Wrong file magic number " + h_magic());
}
_symbol_tab = file.getChannel().map(MapMode.READ_ONLY, 256, h_size());
_symbol_tab.order(ByteOrder.LITTLE_ENDIAN);
_state_tab = file.getChannel().map(MapMode.READ_ONLY, 256+h_size(), 4*h_size());
_state_tab.order(ByteOrder.LITTLE_ENDIAN);
_data = file.getChannel().map(MapMode.READ_ONLY, 256+5*h_size(), h_data_size());
_data.order(ByteOrder.LITTLE_ENDIAN);
if (h_has_phash()>0){
_phash = file.getChannel().map(MapMode.READ_ONLY, 256+5*h_size()+h_data_size(), 4*h_size());
_phash.order(ByteOrder.LITTLE_ENDIAN);
} else {
_phash = null;
}
_ok = true;
}
private int h_magic(){
return _header.getInt(0);
}
private int h_version(){
return _header.getInt(4);
}
private int h_checksum(){
return _header.getInt(8);
}
private int h_size(){
return _header.getInt(12);
}
private int h_start(){
return _header.getInt(16);
}
private int h_data_size(){
return _header.getInt(20);
}
private int h_data_type(){
return _header.getInt(24);
}
private int h_fixed_data_size(){
return _header.getInt(28);
}
private int h_has_phash(){
return _header.getInt(32);
}
private int h_serial(){
return _header.getInt(36);
}
private int hashDelta(int state, byte symbol){
int s=symbol;
if(s<0){
s+=256;
}
if(_ok && h_has_phash()==1 && s>0 && s<255){
if(getSymbol(state+s)==s){
return _phash.getInt(4*(state+s));
}
}
return 0;
}
private int delta(int state, byte symbol){
int s=symbol;
if(s<0){
s+=256;
}
if(_ok && s>0 && s<255){
if(getSymbol(state+s)==s){
return _state_tab.getInt(4*(state+s));
}
}
return 0;
}
private int getSymbol(int index){
int symbol = _symbol_tab.get(index);
if(symbol<0){
symbol += 256;
}
return symbol;
}
private boolean isFinal(int state){
return _ok && (getSymbol(state+255)==255);
}
@Override
public void close() {
clean(_header);
clean(_data);
clean(_phash);
clean(_state_tab);
clean(_symbol_tab);
}
private final MappedByteBuffer _header;
private final MappedByteBuffer _symbol_tab;
private final MappedByteBuffer _state_tab;
private final MappedByteBuffer _data;
private final MappedByteBuffer _phash;
private final boolean _ok;
} |
I have better experience of just mocking `DiscFilterRequest` directly with Mockito. Wrapping multiple request types is brittle, and `ApplicationRequestToDiscFilterRequestWrapper` is more of an ad-hoc hack. | public void testFilter() {
HttpRequest.Builder request = HttpRequest.newBuilder(URI.create("https:
byte[] emptyBody = new byte[0];
DiscFilterRequest unsigned = requestOf(request.method("GET", HttpRequest.BodyPublishers.ofByteArray(emptyBody)).build(), emptyBody);
assertFalse(filter.filter(unsigned).isEmpty());
DiscFilterRequest signed = requestOf(signer.signed(request, Method.GET), emptyBody);
assertFalse(filter.filter(signed).isEmpty());
applications.lockOrThrow(id, application -> applications.store(application.withPemDeployKey(otherPublicKey)));
assertFalse(filter.filter(signed).isEmpty());
applications.lockOrThrow(id, application -> applications.store(application.withPemDeployKey(publicKey)));
assertTrue(filter.filter(signed).isEmpty());
SecurityContext securityContext = (SecurityContext) signed.getAttribute(SecurityContext.ATTRIBUTE_NAME);
assertEquals("buildService@my-tenant.my-app", securityContext.principal().getName());
assertEquals(Set.of(Role.buildService(id.tenant(), id.application())), securityContext.roles());
byte[] hiBytes = new byte[]{0x48, 0x69};
signed = requestOf(signer.signed(request, Method.POST), hiBytes);
assertTrue(filter.filter(signed).isEmpty());
assertFalse(filter.filter(unsigned).isEmpty());
} | DiscFilterRequest unsigned = requestOf(request.method("GET", HttpRequest.BodyPublishers.ofByteArray(emptyBody)).build(), emptyBody); | public void testFilter() {
HttpRequest.Builder request = HttpRequest.newBuilder(URI.create("https:
byte[] emptyBody = new byte[0];
DiscFilterRequest unsigned = requestOf(request.method("GET", HttpRequest.BodyPublishers.ofByteArray(emptyBody)).build(), emptyBody);
filter.filter(unsigned);
assertNull(unsigned.getAttribute(SecurityContext.ATTRIBUTE_NAME));
DiscFilterRequest signed = requestOf(signer.signed(request, Method.GET), emptyBody);
filter.filter(signed);
assertNull(signed.getAttribute(SecurityContext.ATTRIBUTE_NAME));
applications.lockOrThrow(id, application -> applications.store(application.withPemDeployKey(otherPublicKey)));
filter.filter(signed);
assertNull(signed.getAttribute(SecurityContext.ATTRIBUTE_NAME));
applications.lockOrThrow(id, application -> applications.store(application.withPemDeployKey(publicKey)));
assertTrue(filter.filter(signed).isEmpty());
SecurityContext securityContext = (SecurityContext) signed.getAttribute(SecurityContext.ATTRIBUTE_NAME);
assertEquals("buildService@my-tenant.my-app", securityContext.principal().getName());
assertEquals(Set.of(Role.buildService(id.tenant(), id.application())), securityContext.roles());
byte[] hiBytes = new byte[]{0x48, 0x69};
signed = requestOf(signer.signed(request, Method.POST), hiBytes);
filter.filter(signed);
securityContext = (SecurityContext) signed.getAttribute(SecurityContext.ATTRIBUTE_NAME);
assertEquals("buildService@my-tenant.my-app", securityContext.principal().getName());
assertEquals(Set.of(Role.buildService(id.tenant(), id.application())), securityContext.roles());
filter.filter(unsigned);
assertNull(unsigned.getAttribute(SecurityContext.ATTRIBUTE_NAME));
} | class SignatureFilterTest {
private static final String publicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String otherPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" +
"pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String privateKey = "-----BEGIN EC PRIVATE KEY-----\n" +
"MHcCAQEEIJUmbIX8YFLHtpRgkwqDDE3igU9RG6JD9cYHWAZii9j7oAoGCCqGSM49\n" +
"AwEHoUQDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9z/4jKSTHwbYR8wdsOSrJGVEU\n" +
"PbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END EC PRIVATE KEY-----\n";
private static final ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
private ControllerTester tester;
private ApplicationController applications;
private SignatureFilter filter;
private RequestSigner signer;
@Before
public void setup() {
tester = new ControllerTester();
applications = tester.controller().applications();
filter = new SignatureFilter(tester.controller());
signer = new RequestSigner(privateKey, id.serializedForm(), tester.clock());
tester.createApplication(tester.createTenant(id.tenant().value(), "unused", 496L),
id.application().value(),
id.instance().value(),
28L);
}
@Test
private static DiscFilterRequest requestOf(HttpRequest request, byte[] body) {
Request converted = new Request(request.uri().toString(), body, Request.Method.valueOf(request.method()));
converted.getHeaders().addAll(request.headers().map());
return new ApplicationRequestToDiscFilterRequestWrapper(converted);
}
} | class SignatureFilterTest {
private static final String publicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String otherPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" +
"pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String privateKey = "-----BEGIN EC PRIVATE KEY-----\n" +
"MHcCAQEEIJUmbIX8YFLHtpRgkwqDDE3igU9RG6JD9cYHWAZii9j7oAoGCCqGSM49\n" +
"AwEHoUQDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9z/4jKSTHwbYR8wdsOSrJGVEU\n" +
"PbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END EC PRIVATE KEY-----\n";
private static final ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
private ControllerTester tester;
private ApplicationController applications;
private SignatureFilter filter;
private RequestSigner signer;
@Before
public void setup() {
tester = new ControllerTester();
applications = tester.controller().applications();
filter = new SignatureFilter(tester.controller());
signer = new RequestSigner(privateKey, id.serializedForm(), tester.clock());
tester.createApplication(tester.createTenant(id.tenant().value(), "unused", 496L),
id.application().value(),
id.instance().value(),
28L);
}
@Test
private static DiscFilterRequest requestOf(HttpRequest request, byte[] body) {
Request converted = new Request(request.uri().toString(), body, Request.Method.valueOf(request.method()));
converted.getHeaders().addAll(request.headers().map());
return new ApplicationRequestToDiscFilterRequestWrapper(converted);
}
} |
`this.headers.putIfAbsent(name, new ArrayList<>()).add(value);` | private RequestBuilder header(String name, String value) {
this.headers.putIfAbsent(name, new ArrayList<>());
this.headers.get(name).add(value);
return this;
} | this.headers.putIfAbsent(name, new ArrayList<>()); | private RequestBuilder header(String name, String value) {
this.headers.putIfAbsent(name, new ArrayList<>());
this.headers.get(name).add(value);
return this;
} | class RequestBuilder implements Supplier<Request> {
private final String path;
private final Request.Method method;
private byte[] data = new byte[0];
private AthenzIdentity identity;
private OktaAccessToken oktaAccessToken;
private String contentType = "application/json";
private Map<String, List<String>> headers = new HashMap<>();
private String recursive;
private RequestBuilder(String path, Request.Method method) {
this.path = path;
this.method = method;
}
private RequestBuilder data(byte[] data) { this.data = data; return this; }
private RequestBuilder data(String data) { return data(data.getBytes(StandardCharsets.UTF_8)); }
private RequestBuilder data(MultiPartStreamer streamer) {
return Exceptions.uncheck(() -> data(streamer.data().readAllBytes()).contentType(streamer.contentType()));
}
private RequestBuilder data(HttpEntity data) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
try {
data.writeTo(out);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return data(out.toByteArray()).contentType(data.getContentType().getValue());
}
private RequestBuilder userIdentity(UserId userId) { this.identity = HostedAthenzIdentities.from(userId); return this; }
private RequestBuilder screwdriverIdentity(ScrewdriverId screwdriverId) { this.identity = HostedAthenzIdentities.from(screwdriverId); return this; }
private RequestBuilder oktaAccessToken(OktaAccessToken oktaAccessToken) { this.oktaAccessToken = oktaAccessToken; return this; }
private RequestBuilder contentType(String contentType) { this.contentType = contentType; return this; }
private RequestBuilder recursive(String recursive) { this.recursive = recursive; return this; }
@Override
public Request get() {
Request request = new Request("http:
(recursive == null ? "" : "?recursive=" + recursive),
data, method);
request.getHeaders().addAll(headers);
request.getHeaders().put("Content-Type", contentType);
if (identity != null) {
addIdentityToRequest(request, identity);
}
if (oktaAccessToken != null) {
addOktaAccessToken(request, oktaAccessToken);
}
return request;
}
} | class RequestBuilder implements Supplier<Request> {
private final String path;
private final Request.Method method;
private byte[] data = new byte[0];
private AthenzIdentity identity;
private OktaAccessToken oktaAccessToken;
private String contentType = "application/json";
private Map<String, List<String>> headers = new HashMap<>();
private String recursive;
private RequestBuilder(String path, Request.Method method) {
this.path = path;
this.method = method;
}
private RequestBuilder data(byte[] data) { this.data = data; return this; }
private RequestBuilder data(String data) { return data(data.getBytes(StandardCharsets.UTF_8)); }
private RequestBuilder data(MultiPartStreamer streamer) {
return Exceptions.uncheck(() -> data(streamer.data().readAllBytes()).contentType(streamer.contentType()));
}
private RequestBuilder userIdentity(UserId userId) { this.identity = HostedAthenzIdentities.from(userId); return this; }
private RequestBuilder screwdriverIdentity(ScrewdriverId screwdriverId) { this.identity = HostedAthenzIdentities.from(screwdriverId); return this; }
private RequestBuilder oktaAccessToken(OktaAccessToken oktaAccessToken) { this.oktaAccessToken = oktaAccessToken; return this; }
private RequestBuilder contentType(String contentType) { this.contentType = contentType; return this; }
private RequestBuilder recursive(String recursive) { this.recursive = recursive; return this; }
@Override
public Request get() {
Request request = new Request("http:
(recursive == null ? "" : "?recursive=" + recursive),
data, method);
request.getHeaders().addAll(headers);
request.getHeaders().put("Content-Type", contentType);
if (identity != null) {
addIdentityToRequest(request, identity);
}
if (oktaAccessToken != null) {
addOktaAccessToken(request, oktaAccessToken);
}
return request;
}
} |
Will consider that in the future. | public void testFilter() {
HttpRequest.Builder request = HttpRequest.newBuilder(URI.create("https:
byte[] emptyBody = new byte[0];
DiscFilterRequest unsigned = requestOf(request.method("GET", HttpRequest.BodyPublishers.ofByteArray(emptyBody)).build(), emptyBody);
assertFalse(filter.filter(unsigned).isEmpty());
DiscFilterRequest signed = requestOf(signer.signed(request, Method.GET), emptyBody);
assertFalse(filter.filter(signed).isEmpty());
applications.lockOrThrow(id, application -> applications.store(application.withPemDeployKey(otherPublicKey)));
assertFalse(filter.filter(signed).isEmpty());
applications.lockOrThrow(id, application -> applications.store(application.withPemDeployKey(publicKey)));
assertTrue(filter.filter(signed).isEmpty());
SecurityContext securityContext = (SecurityContext) signed.getAttribute(SecurityContext.ATTRIBUTE_NAME);
assertEquals("buildService@my-tenant.my-app", securityContext.principal().getName());
assertEquals(Set.of(Role.buildService(id.tenant(), id.application())), securityContext.roles());
byte[] hiBytes = new byte[]{0x48, 0x69};
signed = requestOf(signer.signed(request, Method.POST), hiBytes);
assertTrue(filter.filter(signed).isEmpty());
assertFalse(filter.filter(unsigned).isEmpty());
} | DiscFilterRequest unsigned = requestOf(request.method("GET", HttpRequest.BodyPublishers.ofByteArray(emptyBody)).build(), emptyBody); | public void testFilter() {
HttpRequest.Builder request = HttpRequest.newBuilder(URI.create("https:
byte[] emptyBody = new byte[0];
DiscFilterRequest unsigned = requestOf(request.method("GET", HttpRequest.BodyPublishers.ofByteArray(emptyBody)).build(), emptyBody);
filter.filter(unsigned);
assertNull(unsigned.getAttribute(SecurityContext.ATTRIBUTE_NAME));
DiscFilterRequest signed = requestOf(signer.signed(request, Method.GET), emptyBody);
filter.filter(signed);
assertNull(signed.getAttribute(SecurityContext.ATTRIBUTE_NAME));
applications.lockOrThrow(id, application -> applications.store(application.withPemDeployKey(otherPublicKey)));
filter.filter(signed);
assertNull(signed.getAttribute(SecurityContext.ATTRIBUTE_NAME));
applications.lockOrThrow(id, application -> applications.store(application.withPemDeployKey(publicKey)));
assertTrue(filter.filter(signed).isEmpty());
SecurityContext securityContext = (SecurityContext) signed.getAttribute(SecurityContext.ATTRIBUTE_NAME);
assertEquals("buildService@my-tenant.my-app", securityContext.principal().getName());
assertEquals(Set.of(Role.buildService(id.tenant(), id.application())), securityContext.roles());
byte[] hiBytes = new byte[]{0x48, 0x69};
signed = requestOf(signer.signed(request, Method.POST), hiBytes);
filter.filter(signed);
securityContext = (SecurityContext) signed.getAttribute(SecurityContext.ATTRIBUTE_NAME);
assertEquals("buildService@my-tenant.my-app", securityContext.principal().getName());
assertEquals(Set.of(Role.buildService(id.tenant(), id.application())), securityContext.roles());
filter.filter(unsigned);
assertNull(unsigned.getAttribute(SecurityContext.ATTRIBUTE_NAME));
} | class SignatureFilterTest {
private static final String publicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String otherPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" +
"pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String privateKey = "-----BEGIN EC PRIVATE KEY-----\n" +
"MHcCAQEEIJUmbIX8YFLHtpRgkwqDDE3igU9RG6JD9cYHWAZii9j7oAoGCCqGSM49\n" +
"AwEHoUQDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9z/4jKSTHwbYR8wdsOSrJGVEU\n" +
"PbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END EC PRIVATE KEY-----\n";
private static final ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
private ControllerTester tester;
private ApplicationController applications;
private SignatureFilter filter;
private RequestSigner signer;
@Before
public void setup() {
tester = new ControllerTester();
applications = tester.controller().applications();
filter = new SignatureFilter(tester.controller());
signer = new RequestSigner(privateKey, id.serializedForm(), tester.clock());
tester.createApplication(tester.createTenant(id.tenant().value(), "unused", 496L),
id.application().value(),
id.instance().value(),
28L);
}
@Test
private static DiscFilterRequest requestOf(HttpRequest request, byte[] body) {
Request converted = new Request(request.uri().toString(), body, Request.Method.valueOf(request.method()));
converted.getHeaders().addAll(request.headers().map());
return new ApplicationRequestToDiscFilterRequestWrapper(converted);
}
} | class SignatureFilterTest {
private static final String publicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String otherPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" +
"pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String privateKey = "-----BEGIN EC PRIVATE KEY-----\n" +
"MHcCAQEEIJUmbIX8YFLHtpRgkwqDDE3igU9RG6JD9cYHWAZii9j7oAoGCCqGSM49\n" +
"AwEHoUQDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9z/4jKSTHwbYR8wdsOSrJGVEU\n" +
"PbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END EC PRIVATE KEY-----\n";
private static final ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
private ControllerTester tester;
private ApplicationController applications;
private SignatureFilter filter;
private RequestSigner signer;
@Before
public void setup() {
tester = new ControllerTester();
applications = tester.controller().applications();
filter = new SignatureFilter(tester.controller());
signer = new RequestSigner(privateKey, id.serializedForm(), tester.clock());
tester.createApplication(tester.createTenant(id.tenant().value(), "unused", 496L),
id.application().value(),
id.instance().value(),
28L);
}
@Test
private static DiscFilterRequest requestOf(HttpRequest request, byte[] body) {
Request converted = new Request(request.uri().toString(), body, Request.Method.valueOf(request.method()));
converted.getHeaders().addAll(request.headers().map());
return new ApplicationRequestToDiscFilterRequestWrapper(converted);
}
} |
I prefer _container_ to _jdisc_ wherever, as the _jdisc_ name will go away some time ... | public void verify_jvm_tag_with_attributes() throws IOException, SAXException {
String servicesXml =
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes>" +
" <jvm options='-XX:SoftRefLRUPolicyMSPerMB=2500' gc-options='-XX:+UseParNewGC' allocated-memory='45%'/>" +
" <node hostalias='mockhost'/>" +
" </nodes>" +
"</jdisc>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.properties(new TestProperties().setHostedVespa(true))
.build());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
model.getConfig(qrStartBuilder, "jdisc/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts());
assertEquals(45, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
assertEquals("-XX:SoftRefLRUPolicyMSPerMB=2500", model.getContainerClusters().values().iterator().next().getContainers().get(0).getJvmOptions());
} | "<jdisc version='1.0'>" + | public void verify_jvm_tag_with_attributes() throws IOException, SAXException {
String servicesXml =
"<container version='1.0'>" +
" <search/>" +
" <nodes>" +
" <jvm options='-XX:SoftRefLRUPolicyMSPerMB=2500' gc-options='-XX:+UseParNewGC' allocated-memory='45%'/>" +
" <node hostalias='mockhost'/>" +
" </nodes>" +
"</container>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.properties(new TestProperties().setHostedVespa(true))
.build());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
model.getConfig(qrStartBuilder, "container/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts());
assertEquals(45, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
assertEquals("-XX:SoftRefLRUPolicyMSPerMB=2500", model.getContainerClusters().values().iterator().next().getContainers().get(0).getJvmOptions());
} | class ContainerModelBuilderTest extends ContainerModelBuilderTestBase {
@Test
@Test
public void detect_conflicting_jvmgcoptions_in_jvmargs() {
assertFalse(ContainerModelBuilder.incompatibleGCOptions(""));
assertFalse(ContainerModelBuilder.incompatibleGCOptions("UseG1GC"));
assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:+UseG1GC"));
assertTrue(ContainerModelBuilder.incompatibleGCOptions("abc -XX:+UseParNewGC xyz"));
assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:CMSInitiatingOccupancyFraction=19"));
}
@Test
public void honours_jvm_gc_options() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc version='1.0'>",
" <search/>",
" <nodes jvm-gc-options='-XX:+UseG1GC'>",
" <node hostalias='mockhost'/>",
" </nodes>",
"</jdisc>" );
createModel(root, clusterElem);
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
root.getConfig(qrStartBuilder, "jdisc/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals("-XX:+UseG1GC", qrStartConfig.jvm().gcopts());
}
private static void verifyIgnoreJvmGCOptions(boolean isHosted) throws IOException, SAXException {
verifyIgnoreJvmGCOptionsIfJvmArgs(isHosted, "jvmargs", ContainerCluster.G1GC);
verifyIgnoreJvmGCOptionsIfJvmArgs(isHosted, "jvm-options", "-XX:+UseG1GC");
}
private static void verifyIgnoreJvmGCOptionsIfJvmArgs(boolean isHosted, String jvmOptionsName, String expectedGC) throws IOException, SAXException {
String servicesXml =
"<jdisc version='1.0'>" +
" <nodes jvm-gc-options='-XX:+UseG1GC' " + jvmOptionsName + "='-XX:+UseParNewGC'>" +
" <node hostalias='mockhost'/>" +
" </nodes>" +
"</jdisc>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.zone(new Zone(SystemName.cd, Environment.dev, RegionName.from("here")))
.properties(new TestProperties().setHostedVespa(isHosted))
.build());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
model.getConfig(qrStartBuilder, "jdisc/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals(expectedGC, qrStartConfig.jvm().gcopts());
}
@Test
public void ignores_jvmgcoptions_on_conflicting_jvmargs() throws IOException, SAXException {
verifyIgnoreJvmGCOptions(false);
verifyIgnoreJvmGCOptions(true);
}
private void verifyJvmGCOptions(boolean isHosted, String override, Zone zone, String expected) throws IOException, SAXException {
String servicesXml =
"<jdisc version='1.0'>" +
" <nodes " + ((override == null) ? ">" : ("jvm-gc-options='" + override + "'>")) +
" <node hostalias='mockhost'/>" +
" </nodes>" +
"</jdisc>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.zone(zone)
.properties(new TestProperties().setHostedVespa(isHosted))
.build());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
model.getConfig(qrStartBuilder, "jdisc/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals(expected, qrStartConfig.jvm().gcopts());
}
private void verifyJvmGCOptions(boolean isHosted, Zone zone, String expected) throws IOException, SAXException {
verifyJvmGCOptions(isHosted, null, zone, expected);
verifyJvmGCOptions(isHosted, "-XX:+UseG1GC", zone, "-XX:+UseG1GC");
Zone DEV = new Zone(SystemName.dev, zone.environment(), zone.region());
verifyJvmGCOptions(isHosted, null, DEV, ContainerCluster.G1GC);
verifyJvmGCOptions(isHosted, "-XX:+UseConcMarkSweepGC", DEV, "-XX:+UseConcMarkSweepGC");
}
@Test
public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException {
verifyJvmGCOptions(false, Zone.defaultZone(),ContainerCluster.G1GC);
verifyJvmGCOptions(true, Zone.defaultZone(), ContainerCluster.G1GC);
}
@Test
public void default_port_is_4080() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc version='1.0'>",
nodesXml,
"</jdisc>" );
createModel(root, clusterElem);
AbstractService container = (AbstractService)root.getProducer("jdisc/container.0");
assertThat(container.getRelativePort(0), is(getDefaults().vespaWebServicePort()));
}
@Test
public void http_server_port_is_configurable_and_does_not_affect_other_ports() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc version='1.0'>",
" <http>",
" <server port='9000' id='foo' />",
" </http>",
nodesXml,
"</jdisc>" );
createModel(root, clusterElem);
AbstractService container = (AbstractService)root.getProducer("jdisc/container.0");
assertThat(container.getRelativePort(0), is(9000));
assertThat(container.getRelativePort(1), is(not(9001)));
}
@Test
public void fail_if_http_port_is_not_4080_in_hosted_vespa() throws Exception {
String servicesXml =
"<services>" +
"<admin version='3.0'>" +
" <nodes count='1'/>" +
"</admin>" +
"<jdisc version='1.0'>" +
" <http>" +
" <server port='9000' id='foo' />" +
" </http>" +
nodesXml +
"</jdisc>" +
"</services>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.properties(new TestProperties().setHostedVespa(true))
.build());
assertFalse(logger.msgs.isEmpty());
assertThat(logger.msgs.get(0).getSecond(), containsString(String.format("You cannot set port to anything else than %d", Container.BASEPORT)));
}
private static class MyLogger implements DeployLogger {
List<Pair<Level, String>> msgs = new ArrayList<>();
@Override
public void log(Level level, String message) {
msgs.add(new Pair<>(level, message));
}
}
@Test
public void one_cluster_with_explicit_port_and_one_without_is_ok() {
Element cluster1Elem = DomBuilderTest.parse(
"<jdisc id='cluster1' version='1.0' />");
Element cluster2Elem = DomBuilderTest.parse(
"<jdisc id='cluster2' version='1.0'>",
" <http>",
" <server port='8000' id='foo' />",
" </http>",
"</jdisc>");
createModel(root, cluster1Elem, cluster2Elem);
}
@Test
public void two_clusters_without_explicit_port_throws_exception() {
Element cluster1Elem = DomBuilderTest.parse(
"<jdisc id='cluster1' version='1.0'>",
nodesXml,
"</jdisc>" );
Element cluster2Elem = DomBuilderTest.parse(
"<jdisc id='cluster2' version='1.0'>",
nodesXml,
"</jdisc>" );
try {
createModel(root, cluster1Elem, cluster2Elem);
fail("Expected exception");
} catch (RuntimeException e) {
assertThat(e.getMessage(), containsString("cannot reserve port"));
}
}
@Test
public void verify_bindings_for_builtin_handlers() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0' />"
);
createModel(root, clusterElem);
JdiscBindingsConfig config = root.getConfig(JdiscBindingsConfig.class, "default/container.0");
JdiscBindingsConfig.Handlers defaultRootHandler = config.handlers(BindingsOverviewHandler.class.getName());
assertThat(defaultRootHandler.serverBindings(), contains("*:
JdiscBindingsConfig.Handlers applicationStatusHandler = config.handlers(ApplicationStatusHandler.class.getName());
assertThat(applicationStatusHandler.serverBindings(),
contains("http:
JdiscBindingsConfig.Handlers fileRequestHandler = config.handlers(VipStatusHandler.class.getName());
assertThat(fileRequestHandler.serverBindings(),
contains("http:
}
@Test
public void default_root_handler_is_disabled_when_user_adds_a_handler_with_same_binding() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>" +
" <handler id='userRootHandler'>" +
" <binding>" + ContainerCluster.ROOT_HANDLER_BINDING + "</binding>" +
" </handler>" +
"</jdisc>");
createModel(root, clusterElem);
ComponentsConfig.Components userRootHandler = getComponent(componentsConfig(), BindingsOverviewHandler.class.getName());
assertThat(userRootHandler, nullValue());
}
@Test
public void handler_bindings_are_included_in_discBindings_config() {
createClusterWithJDiscHandler();
String discBindingsConfig = root.getConfig(JdiscBindingsConfig.class, "default").toString();
assertThat(discBindingsConfig, containsString("{discHandler}"));
assertThat(discBindingsConfig, containsString(".serverBindings[0] \"binding0\""));
assertThat(discBindingsConfig, containsString(".serverBindings[1] \"binding1\""));
assertThat(discBindingsConfig, containsString(".clientBindings[0] \"clientBinding\""));
}
@Test
public void handlers_are_included_in_components_config() {
createClusterWithJDiscHandler();
assertThat(componentsConfig().toString(), containsString(".id \"discHandler\""));
}
private void createClusterWithJDiscHandler() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <handler id='discHandler'>",
" <binding>binding0</binding>",
" <binding>binding1</binding>",
" <clientBinding>clientBinding</clientBinding>",
" </handler>",
"</jdisc>");
createModel(root, clusterElem);
}
@Test
public void servlets_are_included_in_ServletPathConfig() {
createClusterWithServlet();
ServletPathsConfig servletPathsConfig = root.getConfig(ServletPathsConfig.class, "default");
assertThat(servletPathsConfig.servlets().values().iterator().next().path(), is("p/a/t/h"));
}
@Test
public void servletconfig_is_produced() {
createClusterWithServlet();
String configId = getContainerCluster("default").getServletMap().
values().iterator().next().getConfigId();
ServletConfigConfig servletConfig = root.getConfig(ServletConfigConfig.class, configId);
assertThat(servletConfig.map().get("myKey"), is("myValue"));
}
private void createClusterWithServlet() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <servlet id='myServlet' class='myClass' bundle='myBundle'>",
" <path>p/a/t/h</path>",
" <servlet-config>",
" <myKey>myValue</myKey>",
" </servlet-config>",
" </servlet>",
"</jdisc>");
createModel(root, clusterElem);
}
@Test
public void processing_handler_bindings_can_be_overridden() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <processing>",
" <binding>binding0</binding>",
" <binding>binding1</binding>",
" </processing>",
"</jdisc>");
createModel(root, clusterElem);
String discBindingsConfig = root.getConfig(JdiscBindingsConfig.class, "default").toString();
assertThat(discBindingsConfig, containsString(".serverBindings[0] \"binding0\""));
assertThat(discBindingsConfig, containsString(".serverBindings[1] \"binding1\""));
assertThat(discBindingsConfig, not(containsString("/processing/*")));
}
@Test
public void clientProvider_bindings_are_included_in_discBindings_config() {
createModelWithClientProvider();
String discBindingsConfig = root.getConfig(JdiscBindingsConfig.class, "default").toString();
assertThat(discBindingsConfig, containsString("{discClient}"));
assertThat(discBindingsConfig, containsString(".clientBindings[0] \"binding0\""));
assertThat(discBindingsConfig, containsString(".clientBindings[1] \"binding1\""));
assertThat(discBindingsConfig, containsString(".serverBindings[0] \"serverBinding\""));
}
@Test
public void clientProviders_are_included_in_components_config() {
createModelWithClientProvider();
assertThat(componentsConfig().toString(), containsString(".id \"discClient\""));
}
private void createModelWithClientProvider() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>" +
" <client id='discClient'>" +
" <binding>binding0</binding>" +
" <binding>binding1</binding>" +
" <serverBinding>serverBinding</serverBinding>" +
" </client>" +
"</jdisc>" );
createModel(root, clusterElem);
}
@Test
public void serverProviders_are_included_in_components_config() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>" +
" <server id='discServer' />" +
"</jdisc>" );
createModel(root, clusterElem);
String componentsConfig = componentsConfig().toString();
assertThat(componentsConfig, containsString(".id \"discServer\""));
}
private String getChainsConfig(String configId) {
return root.getConfig(ChainsConfig.class, configId).toString();
}
@Test
public void searchHandler_gets_only_search_chains_in_chains_config() {
createClusterWithProcessingAndSearchChains();
String searchHandlerConfigId = "default/component/com.yahoo.search.handler.SearchHandler";
String chainsConfig = getChainsConfig(searchHandlerConfigId);
assertThat(chainsConfig, containsLineWithPattern(".*\\.id \"testSearcher@default\"$"));
assertThat(chainsConfig, not(containsLineWithPattern(".*\\.id \"testProcessor@default\"$")));
}
@Test
public void processingHandler_gets_only_processing_chains_in_chains_config() {
createClusterWithProcessingAndSearchChains();
String processingHandlerConfigId = "default/component/com.yahoo.processing.handler.ProcessingHandler";
String chainsConfig = getChainsConfig(processingHandlerConfigId);
assertThat(chainsConfig, containsLineWithPattern(".*\\.id \"testProcessor@default\"$"));
assertThat(chainsConfig, not(containsLineWithPattern(".*\\.id \"testSearcher@default\"$")));
}
private void createClusterWithProcessingAndSearchChains() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>" +
" <search>" +
" <chain id='default'>" +
" <searcher id='testSearcher' />" +
" </chain>" +
" </search>" +
" <processing>" +
" <chain id='default'>" +
" <processor id='testProcessor'/>" +
" </chain>" +
" </processing>" +
nodesXml +
" </jdisc>");
createModel(root, clusterElem);
}
@Test
public void user_config_can_be_overridden_on_node() {
Element containerElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <config name=\"prelude.cluster.qr-monitor\">" +
" <requesttimeout>111</requesttimeout>",
" </config> " +
" <nodes>",
" <node hostalias='host1' />",
" <node hostalias='host2'>",
" <config name=\"prelude.cluster.qr-monitor\">",
" <requesttimeout>222</requesttimeout>",
" </config> ",
" </node>",
" </nodes>",
"</jdisc>");
root = ContentClusterUtils.createMockRoot(new String[]{"host1", "host2"});
createModel(root, containerElem);
ContainerCluster cluster = (ContainerCluster)root.getChildren().get("default");
assertThat(cluster.getContainers().size(), is(2));
assertEquals(root.getConfig(QrMonitorConfig.class, "default/container.0").requesttimeout(), 111);
assertEquals(root.getConfig(QrMonitorConfig.class, "default/container.1").requesttimeout(), 222);
}
@Test
public void nested_components_are_injected_to_handlers() throws Exception {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <handler id='myHandler'>",
" <component id='injected' />",
" </handler>",
" <client id='myClient'>",
" <component id='injected' />",
" </client>",
"</jdisc>");
createModel(root, clusterElem);
Component<?,?> handler = getContainerComponent("default", "myHandler");
assertThat(handler.getInjectedComponentIds(), hasItem("injected@myHandler"));
Component<?,?> client = getContainerComponent("default", "myClient");
assertThat(client.getInjectedComponentIds(), hasItem("injected@myClient"));
}
@Test
public void component_includes_are_added() {
VespaModelCreatorWithFilePkg creator = new VespaModelCreatorWithFilePkg("src/test/cfg/application/include_dirs");
VespaModel model = creator.create(true);
ContainerCluster cluster = model.getContainerClusters().get("default");
Map<ComponentId, Component<?, ?>> componentsMap = cluster.getComponentsMap();
Component<?,?> example = componentsMap.get(
ComponentId.fromString("test.Exampledocproc"));
assertThat(example.getComponentId().getName(), is("test.Exampledocproc"));
}
@Test
public void affinity_is_set() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <http>",
" <server port='" + getDefaults().vespaWebServicePort() + "' id='main' />",
" </http>",
" <nodes cpu-socket-affinity='true'>",
" <node hostalias='node1' />",
" </nodes>" +
"</jdisc>");
createModel(root, clusterElem);
assertTrue(getContainerCluster("default").getContainers().get(0).getAffinity().isPresent());
assertThat(getContainerCluster("default").getContainers().get(0).getAffinity().get().cpuSocket(), is(0));
}
@Test
public void singlenode_servicespec_is_used_with_hosts_xml() throws IOException, SAXException {
String servicesXml = "<jdisc id='default' version='1.0' />";
String hostsXml = "<hosts>\n" +
" <host name=\"test1.yahoo.com\">\n" +
" <alias>node1</alias>\n" +
" </host>\n" +
"</hosts>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder()
.withHosts(hostsXml)
.withServices(servicesXml)
.build();
VespaModel model = new VespaModel(applicationPackage);
assertThat(model.getHostSystem().getHosts().size(), is(1));
}
@Test
public void http_aliases_are_stored_on_cluster_and_on_service_properties() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <aliases>",
" <service-alias>service1</service-alias>",
" <service-alias>service2</service-alias>",
" <endpoint-alias>foo1.bar1.com</endpoint-alias>",
" <endpoint-alias>foo2.bar2.com</endpoint-alias>",
" </aliases>",
" <nodes>",
" <node hostalias='host1' />",
" </nodes>",
"</jdisc>");
createModel(root, clusterElem);
assertEquals(getContainerCluster("default").serviceAliases().get(0), "service1");
assertEquals(getContainerCluster("default").endpointAliases().get(0), "foo1.bar1.com");
assertEquals(getContainerCluster("default").serviceAliases().get(1), "service2");
assertEquals(getContainerCluster("default").endpointAliases().get(1), "foo2.bar2.com");
assertEquals(getContainerCluster("default").getContainers().get(0).getServicePropertyString("servicealiases"), "service1,service2");
assertEquals(getContainerCluster("default").getContainers().get(0).getServicePropertyString("endpointaliases"), "foo1.bar1.com,foo2.bar2.com");
}
@Test
public void http_aliases_are_only_honored_in_prod_environment() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <aliases>",
" <service-alias>service1</service-alias>",
" <endpoint-alias>foo1.bar1.com</endpoint-alias>",
" </aliases>",
" <nodes>",
" <node hostalias='host1' />",
" </nodes>",
"</jdisc>");
DeployState deployState = new DeployState.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east-1"))).build();
createModel(root, deployState, null, clusterElem);
assertEquals(0, getContainerCluster("default").serviceAliases().size());
assertEquals(0, getContainerCluster("default").endpointAliases().size());
assertNull(getContainerCluster("default").getContainers().get(0).getServicePropertyString("servicealiases"));
assertNull(getContainerCluster("default").getContainers().get(0).getServicePropertyString("endpointaliases"));
}
@Test
public void singlenode_servicespec_is_used_with_hosted_vespa() throws IOException, SAXException {
String servicesXml = "<jdisc id='default' version='1.0' />";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.modelHostProvisioner(new InMemoryProvisioner(true, "host1.yahoo.com", "host2.yahoo.com"))
.applicationPackage(applicationPackage)
.properties(new TestProperties()
.setMultitenant(true)
.setHostedVespa(true))
.build());
assertEquals(1, model.getHostSystem().getHosts().size());
}
@Test(expected = IllegalArgumentException.class)
public void renderers_named_JsonRenderer_are_not_allowed() {
createModel(root, generateContainerElementWithRenderer("JsonRenderer"));
}
@Test(expected = IllegalArgumentException.class)
public void renderers_named_DefaultRenderer_are_not_allowed() {
createModel(root, generateContainerElementWithRenderer("XmlRenderer"));
}
@Test
public void renderers_named_something_else_are_allowed() {
createModel(root, generateContainerElementWithRenderer("my-little-renderer"));
}
@Test
public void vip_status_handler_uses_file_for_hosted_vespa() throws Exception {
String servicesXml = "<services>" +
"<jdisc version='1.0'>" +
nodesXml +
"</jdisc>" +
"</services>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.properties(new TestProperties().setHostedVespa(true))
.build());
AbstractConfigProducerRoot modelRoot = model.getRoot();
VipStatusConfig vipStatusConfig = modelRoot.getConfig(VipStatusConfig.class, "jdisc/component/status.html-status-handler");
assertTrue(vipStatusConfig.accessdisk());
assertEquals(ContainerModelBuilder.HOSTED_VESPA_STATUS_FILE, vipStatusConfig.statusfile());
}
@Test
public void qrconfig_is_produced() throws IOException, SAXException {
String servicesXml =
"<services>" +
"<admin version='3.0'>" +
" <nodes count='1'/>" +
"</admin>" +
"<jdisc id ='default' version='1.0'>" +
" <nodes>" +
" <node hostalias='node1' />" +
" </nodes>" +
"</jdisc>" +
"</services>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder()
.withServices(servicesXml)
.build();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.properties(new TestProperties())
.build());
String hostname = HostName.getLocalhost();
QrConfig config = model.getConfig(QrConfig.class, "default/container.0");
assertEquals("default.container.0", config.discriminator());
assertEquals(19102, config.rpc().port());
assertEquals("vespa/service/default/container.0", config.rpc().slobrokId());
assertTrue(config.rpc().enabled());
assertEquals("", config.rpc().host());
assertFalse(config.restartOnDeploy());
assertEquals("filedistribution/" + hostname, config.filedistributor().configid());
}
@Test
public void secret_store_can_be_set_up() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc version='1.0'>",
" <secret-store>",
" <group name='group1' environment='env1'/>",
" </secret-store>",
"</jdisc>");
createModel(root, clusterElem);
SecretStore secretStore = getContainerCluster("jdisc").getSecretStore().get();
assertEquals("group1", secretStore.getGroups().get(0).name);
assertEquals("env1", secretStore.getGroups().get(0).environment);
}
@Test
public void honours_environment_vars() {
Element clusterElem = DomBuilderTest.parse(
"<container version='1.0'>",
" <nodes>",
" <environment-variables>",
" <KMP_SETTING>1</KMP_SETTING>",
" <KMP_AFFINITY>granularity=fine,verbose,compact,1,0</KMP_AFFINITY>",
" </environment-variables>",
" <node hostalias='mockhost'/>",
" </nodes>",
"</container>" );
createModel(root, clusterElem);
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
root.getConfig(qrStartBuilder, "container/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals("KMP_SETTING=1 KMP_AFFINITY=granularity=fine,verbose,compact,1,0 ", qrStartConfig.qrs().env());
}
private Element generateContainerElementWithRenderer(String rendererId) {
return DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <search>",
String.format(" <renderer id='%s'/>", rendererId),
" </search>",
"</jdisc>");
}
} | class ContainerModelBuilderTest extends ContainerModelBuilderTestBase {
@Test
@Test
public void detect_conflicting_jvmgcoptions_in_jvmargs() {
assertFalse(ContainerModelBuilder.incompatibleGCOptions(""));
assertFalse(ContainerModelBuilder.incompatibleGCOptions("UseG1GC"));
assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:+UseG1GC"));
assertTrue(ContainerModelBuilder.incompatibleGCOptions("abc -XX:+UseParNewGC xyz"));
assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:CMSInitiatingOccupancyFraction=19"));
}
@Test
public void honours_jvm_gc_options() {
Element clusterElem = DomBuilderTest.parse(
"<container version='1.0'>",
" <search/>",
" <nodes jvm-gc-options='-XX:+UseG1GC'>",
" <node hostalias='mockhost'/>",
" </nodes>",
"</container>" );
createModel(root, clusterElem);
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
root.getConfig(qrStartBuilder, "container/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals("-XX:+UseG1GC", qrStartConfig.jvm().gcopts());
}
private static void verifyIgnoreJvmGCOptions(boolean isHosted) throws IOException, SAXException {
verifyIgnoreJvmGCOptionsIfJvmArgs(isHosted, "jvmargs", ContainerCluster.G1GC);
verifyIgnoreJvmGCOptionsIfJvmArgs(isHosted, "jvm-options", "-XX:+UseG1GC");
}
private static void verifyIgnoreJvmGCOptionsIfJvmArgs(boolean isHosted, String jvmOptionsName, String expectedGC) throws IOException, SAXException {
String servicesXml =
"<container version='1.0'>" +
" <nodes jvm-gc-options='-XX:+UseG1GC' " + jvmOptionsName + "='-XX:+UseParNewGC'>" +
" <node hostalias='mockhost'/>" +
" </nodes>" +
"</container>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.zone(new Zone(SystemName.cd, Environment.dev, RegionName.from("here")))
.properties(new TestProperties().setHostedVespa(isHosted))
.build());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
model.getConfig(qrStartBuilder, "container/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals(expectedGC, qrStartConfig.jvm().gcopts());
}
@Test
public void ignores_jvmgcoptions_on_conflicting_jvmargs() throws IOException, SAXException {
verifyIgnoreJvmGCOptions(false);
verifyIgnoreJvmGCOptions(true);
}
private void verifyJvmGCOptions(boolean isHosted, String override, Zone zone, String expected) throws IOException, SAXException {
String servicesXml =
"<container version='1.0'>" +
" <nodes " + ((override == null) ? ">" : ("jvm-gc-options='" + override + "'>")) +
" <node hostalias='mockhost'/>" +
" </nodes>" +
"</container>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.zone(zone)
.properties(new TestProperties().setHostedVespa(isHosted))
.build());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
model.getConfig(qrStartBuilder, "container/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals(expected, qrStartConfig.jvm().gcopts());
}
private void verifyJvmGCOptions(boolean isHosted, Zone zone, String expected) throws IOException, SAXException {
verifyJvmGCOptions(isHosted, null, zone, expected);
verifyJvmGCOptions(isHosted, "-XX:+UseG1GC", zone, "-XX:+UseG1GC");
Zone DEV = new Zone(SystemName.dev, zone.environment(), zone.region());
verifyJvmGCOptions(isHosted, null, DEV, ContainerCluster.G1GC);
verifyJvmGCOptions(isHosted, "-XX:+UseConcMarkSweepGC", DEV, "-XX:+UseConcMarkSweepGC");
}
@Test
public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException {
verifyJvmGCOptions(false, Zone.defaultZone(),ContainerCluster.G1GC);
verifyJvmGCOptions(true, Zone.defaultZone(), ContainerCluster.G1GC);
}
@Test
public void default_port_is_4080() {
Element clusterElem = DomBuilderTest.parse(
"<container version='1.0'>",
nodesXml,
"</container>" );
createModel(root, clusterElem);
AbstractService container = (AbstractService)root.getProducer("container/container.0");
assertThat(container.getRelativePort(0), is(getDefaults().vespaWebServicePort()));
}
@Test
public void http_server_port_is_configurable_and_does_not_affect_other_ports() {
Element clusterElem = DomBuilderTest.parse(
"<container version='1.0'>",
" <http>",
" <server port='9000' id='foo' />",
" </http>",
nodesXml,
"</container>" );
createModel(root, clusterElem);
AbstractService container = (AbstractService)root.getProducer("container/container.0");
assertThat(container.getRelativePort(0), is(9000));
assertThat(container.getRelativePort(1), is(not(9001)));
}
@Test
public void fail_if_http_port_is_not_4080_in_hosted_vespa() throws Exception {
String servicesXml =
"<services>" +
"<admin version='3.0'>" +
" <nodes count='1'/>" +
"</admin>" +
"<container version='1.0'>" +
" <http>" +
" <server port='9000' id='foo' />" +
" </http>" +
nodesXml +
"</container>" +
"</services>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.properties(new TestProperties().setHostedVespa(true))
.build());
assertFalse(logger.msgs.isEmpty());
assertThat(logger.msgs.get(0).getSecond(), containsString(String.format("You cannot set port to anything else than %d", Container.BASEPORT)));
}
private static class MyLogger implements DeployLogger {
List<Pair<Level, String>> msgs = new ArrayList<>();
@Override
public void log(Level level, String message) {
msgs.add(new Pair<>(level, message));
}
}
@Test
public void one_cluster_with_explicit_port_and_one_without_is_ok() {
Element cluster1Elem = DomBuilderTest.parse(
"<container id='cluster1' version='1.0' />");
Element cluster2Elem = DomBuilderTest.parse(
"<container id='cluster2' version='1.0'>",
" <http>",
" <server port='8000' id='foo' />",
" </http>",
"</container>");
createModel(root, cluster1Elem, cluster2Elem);
}
@Test
public void two_clusters_without_explicit_port_throws_exception() {
Element cluster1Elem = DomBuilderTest.parse(
"<container id='cluster1' version='1.0'>",
nodesXml,
"</container>" );
Element cluster2Elem = DomBuilderTest.parse(
"<container id='cluster2' version='1.0'>",
nodesXml,
"</container>" );
try {
createModel(root, cluster1Elem, cluster2Elem);
fail("Expected exception");
} catch (RuntimeException e) {
assertThat(e.getMessage(), containsString("cannot reserve port"));
}
}
@Test
public void verify_bindings_for_builtin_handlers() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0' />"
);
createModel(root, clusterElem);
JdiscBindingsConfig config = root.getConfig(JdiscBindingsConfig.class, "default/container.0");
JdiscBindingsConfig.Handlers defaultRootHandler = config.handlers(BindingsOverviewHandler.class.getName());
assertThat(defaultRootHandler.serverBindings(), contains("*:
JdiscBindingsConfig.Handlers applicationStatusHandler = config.handlers(ApplicationStatusHandler.class.getName());
assertThat(applicationStatusHandler.serverBindings(),
contains("http:
JdiscBindingsConfig.Handlers fileRequestHandler = config.handlers(VipStatusHandler.class.getName());
assertThat(fileRequestHandler.serverBindings(),
contains("http:
}
@Test
public void default_root_handler_is_disabled_when_user_adds_a_handler_with_same_binding() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>" +
" <handler id='userRootHandler'>" +
" <binding>" + ContainerCluster.ROOT_HANDLER_BINDING + "</binding>" +
" </handler>" +
"</container>");
createModel(root, clusterElem);
ComponentsConfig.Components userRootHandler = getComponent(componentsConfig(), BindingsOverviewHandler.class.getName());
assertThat(userRootHandler, nullValue());
}
@Test
public void handler_bindings_are_included_in_discBindings_config() {
createClusterWithJDiscHandler();
String discBindingsConfig = root.getConfig(JdiscBindingsConfig.class, "default").toString();
assertThat(discBindingsConfig, containsString("{discHandler}"));
assertThat(discBindingsConfig, containsString(".serverBindings[0] \"binding0\""));
assertThat(discBindingsConfig, containsString(".serverBindings[1] \"binding1\""));
assertThat(discBindingsConfig, containsString(".clientBindings[0] \"clientBinding\""));
}
@Test
public void handlers_are_included_in_components_config() {
createClusterWithJDiscHandler();
assertThat(componentsConfig().toString(), containsString(".id \"discHandler\""));
}
private void createClusterWithJDiscHandler() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <handler id='discHandler'>",
" <binding>binding0</binding>",
" <binding>binding1</binding>",
" <clientBinding>clientBinding</clientBinding>",
" </handler>",
"</container>");
createModel(root, clusterElem);
}
@Test
public void servlets_are_included_in_ServletPathConfig() {
createClusterWithServlet();
ServletPathsConfig servletPathsConfig = root.getConfig(ServletPathsConfig.class, "default");
assertThat(servletPathsConfig.servlets().values().iterator().next().path(), is("p/a/t/h"));
}
@Test
public void servletconfig_is_produced() {
createClusterWithServlet();
String configId = getContainerCluster("default").getServletMap().
values().iterator().next().getConfigId();
ServletConfigConfig servletConfig = root.getConfig(ServletConfigConfig.class, configId);
assertThat(servletConfig.map().get("myKey"), is("myValue"));
}
private void createClusterWithServlet() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <servlet id='myServlet' class='myClass' bundle='myBundle'>",
" <path>p/a/t/h</path>",
" <servlet-config>",
" <myKey>myValue</myKey>",
" </servlet-config>",
" </servlet>",
"</container>");
createModel(root, clusterElem);
}
@Test
public void processing_handler_bindings_can_be_overridden() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <processing>",
" <binding>binding0</binding>",
" <binding>binding1</binding>",
" </processing>",
"</container>");
createModel(root, clusterElem);
String discBindingsConfig = root.getConfig(JdiscBindingsConfig.class, "default").toString();
assertThat(discBindingsConfig, containsString(".serverBindings[0] \"binding0\""));
assertThat(discBindingsConfig, containsString(".serverBindings[1] \"binding1\""));
assertThat(discBindingsConfig, not(containsString("/processing/*")));
}
@Test
public void clientProvider_bindings_are_included_in_discBindings_config() {
createModelWithClientProvider();
String discBindingsConfig = root.getConfig(JdiscBindingsConfig.class, "default").toString();
assertThat(discBindingsConfig, containsString("{discClient}"));
assertThat(discBindingsConfig, containsString(".clientBindings[0] \"binding0\""));
assertThat(discBindingsConfig, containsString(".clientBindings[1] \"binding1\""));
assertThat(discBindingsConfig, containsString(".serverBindings[0] \"serverBinding\""));
}
@Test
public void clientProviders_are_included_in_components_config() {
createModelWithClientProvider();
assertThat(componentsConfig().toString(), containsString(".id \"discClient\""));
}
private void createModelWithClientProvider() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>" +
" <client id='discClient'>" +
" <binding>binding0</binding>" +
" <binding>binding1</binding>" +
" <serverBinding>serverBinding</serverBinding>" +
" </client>" +
"</container>" );
createModel(root, clusterElem);
}
@Test
public void serverProviders_are_included_in_components_config() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>" +
" <server id='discServer' />" +
"</container>" );
createModel(root, clusterElem);
String componentsConfig = componentsConfig().toString();
assertThat(componentsConfig, containsString(".id \"discServer\""));
}
private String getChainsConfig(String configId) {
return root.getConfig(ChainsConfig.class, configId).toString();
}
@Test
public void searchHandler_gets_only_search_chains_in_chains_config() {
createClusterWithProcessingAndSearchChains();
String searchHandlerConfigId = "default/component/com.yahoo.search.handler.SearchHandler";
String chainsConfig = getChainsConfig(searchHandlerConfigId);
assertThat(chainsConfig, containsLineWithPattern(".*\\.id \"testSearcher@default\"$"));
assertThat(chainsConfig, not(containsLineWithPattern(".*\\.id \"testProcessor@default\"$")));
}
@Test
public void processingHandler_gets_only_processing_chains_in_chains_config() {
createClusterWithProcessingAndSearchChains();
String processingHandlerConfigId = "default/component/com.yahoo.processing.handler.ProcessingHandler";
String chainsConfig = getChainsConfig(processingHandlerConfigId);
assertThat(chainsConfig, containsLineWithPattern(".*\\.id \"testProcessor@default\"$"));
assertThat(chainsConfig, not(containsLineWithPattern(".*\\.id \"testSearcher@default\"$")));
}
private void createClusterWithProcessingAndSearchChains() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>" +
" <search>" +
" <chain id='default'>" +
" <searcher id='testSearcher' />" +
" </chain>" +
" </search>" +
" <processing>" +
" <chain id='default'>" +
" <processor id='testProcessor'/>" +
" </chain>" +
" </processing>" +
nodesXml +
" </container>");
createModel(root, clusterElem);
}
@Test
public void user_config_can_be_overridden_on_node() {
Element containerElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <config name=\"prelude.cluster.qr-monitor\">" +
" <requesttimeout>111</requesttimeout>",
" </config> " +
" <nodes>",
" <node hostalias='host1' />",
" <node hostalias='host2'>",
" <config name=\"prelude.cluster.qr-monitor\">",
" <requesttimeout>222</requesttimeout>",
" </config> ",
" </node>",
" </nodes>",
"</container>");
root = ContentClusterUtils.createMockRoot(new String[]{"host1", "host2"});
createModel(root, containerElem);
ContainerCluster cluster = (ContainerCluster)root.getChildren().get("default");
assertThat(cluster.getContainers().size(), is(2));
assertEquals(root.getConfig(QrMonitorConfig.class, "default/container.0").requesttimeout(), 111);
assertEquals(root.getConfig(QrMonitorConfig.class, "default/container.1").requesttimeout(), 222);
}
@Test
public void nested_components_are_injected_to_handlers() throws Exception {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <handler id='myHandler'>",
" <component id='injected' />",
" </handler>",
" <client id='myClient'>",
" <component id='injected' />",
" </client>",
"</container>");
createModel(root, clusterElem);
Component<?,?> handler = getContainerComponent("default", "myHandler");
assertThat(handler.getInjectedComponentIds(), hasItem("injected@myHandler"));
Component<?,?> client = getContainerComponent("default", "myClient");
assertThat(client.getInjectedComponentIds(), hasItem("injected@myClient"));
}
@Test
public void component_includes_are_added() {
VespaModelCreatorWithFilePkg creator = new VespaModelCreatorWithFilePkg("src/test/cfg/application/include_dirs");
VespaModel model = creator.create(true);
ContainerCluster cluster = model.getContainerClusters().get("default");
Map<ComponentId, Component<?, ?>> componentsMap = cluster.getComponentsMap();
Component<?,?> example = componentsMap.get(
ComponentId.fromString("test.Exampledocproc"));
assertThat(example.getComponentId().getName(), is("test.Exampledocproc"));
}
@Test
public void affinity_is_set() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <http>",
" <server port='" + getDefaults().vespaWebServicePort() + "' id='main' />",
" </http>",
" <nodes cpu-socket-affinity='true'>",
" <node hostalias='node1' />",
" </nodes>" +
"</container>");
createModel(root, clusterElem);
assertTrue(getContainerCluster("default").getContainers().get(0).getAffinity().isPresent());
assertThat(getContainerCluster("default").getContainers().get(0).getAffinity().get().cpuSocket(), is(0));
}
@Test
public void singlenode_servicespec_is_used_with_hosts_xml() throws IOException, SAXException {
String servicesXml = "<container id='default' version='1.0' />";
String hostsXml = "<hosts>\n" +
" <host name=\"test1.yahoo.com\">\n" +
" <alias>node1</alias>\n" +
" </host>\n" +
"</hosts>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder()
.withHosts(hostsXml)
.withServices(servicesXml)
.build();
VespaModel model = new VespaModel(applicationPackage);
assertThat(model.getHostSystem().getHosts().size(), is(1));
}
@Test
public void http_aliases_are_stored_on_cluster_and_on_service_properties() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <aliases>",
" <service-alias>service1</service-alias>",
" <service-alias>service2</service-alias>",
" <endpoint-alias>foo1.bar1.com</endpoint-alias>",
" <endpoint-alias>foo2.bar2.com</endpoint-alias>",
" </aliases>",
" <nodes>",
" <node hostalias='host1' />",
" </nodes>",
"</container>");
createModel(root, clusterElem);
assertEquals(getContainerCluster("default").serviceAliases().get(0), "service1");
assertEquals(getContainerCluster("default").endpointAliases().get(0), "foo1.bar1.com");
assertEquals(getContainerCluster("default").serviceAliases().get(1), "service2");
assertEquals(getContainerCluster("default").endpointAliases().get(1), "foo2.bar2.com");
assertEquals(getContainerCluster("default").getContainers().get(0).getServicePropertyString("servicealiases"), "service1,service2");
assertEquals(getContainerCluster("default").getContainers().get(0).getServicePropertyString("endpointaliases"), "foo1.bar1.com,foo2.bar2.com");
}
@Test
public void http_aliases_are_only_honored_in_prod_environment() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <aliases>",
" <service-alias>service1</service-alias>",
" <endpoint-alias>foo1.bar1.com</endpoint-alias>",
" </aliases>",
" <nodes>",
" <node hostalias='host1' />",
" </nodes>",
"</container>");
DeployState deployState = new DeployState.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east-1"))).build();
createModel(root, deployState, null, clusterElem);
assertEquals(0, getContainerCluster("default").serviceAliases().size());
assertEquals(0, getContainerCluster("default").endpointAliases().size());
assertNull(getContainerCluster("default").getContainers().get(0).getServicePropertyString("servicealiases"));
assertNull(getContainerCluster("default").getContainers().get(0).getServicePropertyString("endpointaliases"));
}
@Test
public void singlenode_servicespec_is_used_with_hosted_vespa() throws IOException, SAXException {
String servicesXml = "<container id='default' version='1.0' />";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.modelHostProvisioner(new InMemoryProvisioner(true, "host1.yahoo.com", "host2.yahoo.com"))
.applicationPackage(applicationPackage)
.properties(new TestProperties()
.setMultitenant(true)
.setHostedVespa(true))
.build());
assertEquals(1, model.getHostSystem().getHosts().size());
}
@Test(expected = IllegalArgumentException.class)
public void renderers_named_JsonRenderer_are_not_allowed() {
createModel(root, generateContainerElementWithRenderer("JsonRenderer"));
}
@Test(expected = IllegalArgumentException.class)
public void renderers_named_DefaultRenderer_are_not_allowed() {
createModel(root, generateContainerElementWithRenderer("XmlRenderer"));
}
@Test
public void renderers_named_something_else_are_allowed() {
createModel(root, generateContainerElementWithRenderer("my-little-renderer"));
}
@Test
public void vip_status_handler_uses_file_for_hosted_vespa() throws Exception {
String servicesXml = "<services>" +
"<container version='1.0'>" +
nodesXml +
"</container>" +
"</services>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.properties(new TestProperties().setHostedVespa(true))
.build());
AbstractConfigProducerRoot modelRoot = model.getRoot();
VipStatusConfig vipStatusConfig = modelRoot.getConfig(VipStatusConfig.class, "container/component/status.html-status-handler");
assertTrue(vipStatusConfig.accessdisk());
assertEquals(ContainerModelBuilder.HOSTED_VESPA_STATUS_FILE, vipStatusConfig.statusfile());
}
@Test
public void qrconfig_is_produced() throws IOException, SAXException {
String servicesXml =
"<services>" +
"<admin version='3.0'>" +
" <nodes count='1'/>" +
"</admin>" +
"<container id ='default' version='1.0'>" +
" <nodes>" +
" <node hostalias='node1' />" +
" </nodes>" +
"</container>" +
"</services>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder()
.withServices(servicesXml)
.build();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.properties(new TestProperties())
.build());
String hostname = HostName.getLocalhost();
QrConfig config = model.getConfig(QrConfig.class, "default/container.0");
assertEquals("default.container.0", config.discriminator());
assertEquals(19102, config.rpc().port());
assertEquals("vespa/service/default/container.0", config.rpc().slobrokId());
assertTrue(config.rpc().enabled());
assertEquals("", config.rpc().host());
assertFalse(config.restartOnDeploy());
assertEquals("filedistribution/" + hostname, config.filedistributor().configid());
}
@Test
public void secret_store_can_be_set_up() {
Element clusterElem = DomBuilderTest.parse(
"<container version='1.0'>",
" <secret-store>",
" <group name='group1' environment='env1'/>",
" </secret-store>",
"</container>");
createModel(root, clusterElem);
SecretStore secretStore = getContainerCluster("container").getSecretStore().get();
assertEquals("group1", secretStore.getGroups().get(0).name);
assertEquals("env1", secretStore.getGroups().get(0).environment);
}
@Test
public void honours_environment_vars() {
Element clusterElem = DomBuilderTest.parse(
"<container version='1.0'>",
" <nodes>",
" <environment-variables>",
" <KMP_SETTING>1</KMP_SETTING>",
" <KMP_AFFINITY>granularity=fine,verbose,compact,1,0</KMP_AFFINITY>",
" </environment-variables>",
" <node hostalias='mockhost'/>",
" </nodes>",
"</container>" );
createModel(root, clusterElem);
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
root.getConfig(qrStartBuilder, "container/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals("KMP_SETTING=1 KMP_AFFINITY=granularity=fine,verbose,compact,1,0 ", qrStartConfig.qrs().env());
}
private Element generateContainerElementWithRenderer(String rendererId) {
return DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <search>",
String.format(" <renderer id='%s'/>", rendererId),
" </search>",
"</container>");
}
} |
Unified on jdisc => container | public void verify_jvm_tag_with_attributes() throws IOException, SAXException {
String servicesXml =
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes>" +
" <jvm options='-XX:SoftRefLRUPolicyMSPerMB=2500' gc-options='-XX:+UseParNewGC' allocated-memory='45%'/>" +
" <node hostalias='mockhost'/>" +
" </nodes>" +
"</jdisc>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.properties(new TestProperties().setHostedVespa(true))
.build());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
model.getConfig(qrStartBuilder, "jdisc/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts());
assertEquals(45, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
assertEquals("-XX:SoftRefLRUPolicyMSPerMB=2500", model.getContainerClusters().values().iterator().next().getContainers().get(0).getJvmOptions());
} | "<jdisc version='1.0'>" + | public void verify_jvm_tag_with_attributes() throws IOException, SAXException {
String servicesXml =
"<container version='1.0'>" +
" <search/>" +
" <nodes>" +
" <jvm options='-XX:SoftRefLRUPolicyMSPerMB=2500' gc-options='-XX:+UseParNewGC' allocated-memory='45%'/>" +
" <node hostalias='mockhost'/>" +
" </nodes>" +
"</container>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.properties(new TestProperties().setHostedVespa(true))
.build());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
model.getConfig(qrStartBuilder, "container/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts());
assertEquals(45, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
assertEquals("-XX:SoftRefLRUPolicyMSPerMB=2500", model.getContainerClusters().values().iterator().next().getContainers().get(0).getJvmOptions());
} | class ContainerModelBuilderTest extends ContainerModelBuilderTestBase {
@Test
@Test
public void detect_conflicting_jvmgcoptions_in_jvmargs() {
assertFalse(ContainerModelBuilder.incompatibleGCOptions(""));
assertFalse(ContainerModelBuilder.incompatibleGCOptions("UseG1GC"));
assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:+UseG1GC"));
assertTrue(ContainerModelBuilder.incompatibleGCOptions("abc -XX:+UseParNewGC xyz"));
assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:CMSInitiatingOccupancyFraction=19"));
}
@Test
public void honours_jvm_gc_options() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc version='1.0'>",
" <search/>",
" <nodes jvm-gc-options='-XX:+UseG1GC'>",
" <node hostalias='mockhost'/>",
" </nodes>",
"</jdisc>" );
createModel(root, clusterElem);
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
root.getConfig(qrStartBuilder, "jdisc/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals("-XX:+UseG1GC", qrStartConfig.jvm().gcopts());
}
private static void verifyIgnoreJvmGCOptions(boolean isHosted) throws IOException, SAXException {
verifyIgnoreJvmGCOptionsIfJvmArgs(isHosted, "jvmargs", ContainerCluster.G1GC);
verifyIgnoreJvmGCOptionsIfJvmArgs(isHosted, "jvm-options", "-XX:+UseG1GC");
}
private static void verifyIgnoreJvmGCOptionsIfJvmArgs(boolean isHosted, String jvmOptionsName, String expectedGC) throws IOException, SAXException {
String servicesXml =
"<jdisc version='1.0'>" +
" <nodes jvm-gc-options='-XX:+UseG1GC' " + jvmOptionsName + "='-XX:+UseParNewGC'>" +
" <node hostalias='mockhost'/>" +
" </nodes>" +
"</jdisc>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.zone(new Zone(SystemName.cd, Environment.dev, RegionName.from("here")))
.properties(new TestProperties().setHostedVespa(isHosted))
.build());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
model.getConfig(qrStartBuilder, "jdisc/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals(expectedGC, qrStartConfig.jvm().gcopts());
}
@Test
public void ignores_jvmgcoptions_on_conflicting_jvmargs() throws IOException, SAXException {
verifyIgnoreJvmGCOptions(false);
verifyIgnoreJvmGCOptions(true);
}
private void verifyJvmGCOptions(boolean isHosted, String override, Zone zone, String expected) throws IOException, SAXException {
String servicesXml =
"<jdisc version='1.0'>" +
" <nodes " + ((override == null) ? ">" : ("jvm-gc-options='" + override + "'>")) +
" <node hostalias='mockhost'/>" +
" </nodes>" +
"</jdisc>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.zone(zone)
.properties(new TestProperties().setHostedVespa(isHosted))
.build());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
model.getConfig(qrStartBuilder, "jdisc/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals(expected, qrStartConfig.jvm().gcopts());
}
private void verifyJvmGCOptions(boolean isHosted, Zone zone, String expected) throws IOException, SAXException {
verifyJvmGCOptions(isHosted, null, zone, expected);
verifyJvmGCOptions(isHosted, "-XX:+UseG1GC", zone, "-XX:+UseG1GC");
Zone DEV = new Zone(SystemName.dev, zone.environment(), zone.region());
verifyJvmGCOptions(isHosted, null, DEV, ContainerCluster.G1GC);
verifyJvmGCOptions(isHosted, "-XX:+UseConcMarkSweepGC", DEV, "-XX:+UseConcMarkSweepGC");
}
@Test
public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException {
verifyJvmGCOptions(false, Zone.defaultZone(),ContainerCluster.G1GC);
verifyJvmGCOptions(true, Zone.defaultZone(), ContainerCluster.G1GC);
}
@Test
public void default_port_is_4080() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc version='1.0'>",
nodesXml,
"</jdisc>" );
createModel(root, clusterElem);
AbstractService container = (AbstractService)root.getProducer("jdisc/container.0");
assertThat(container.getRelativePort(0), is(getDefaults().vespaWebServicePort()));
}
@Test
public void http_server_port_is_configurable_and_does_not_affect_other_ports() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc version='1.0'>",
" <http>",
" <server port='9000' id='foo' />",
" </http>",
nodesXml,
"</jdisc>" );
createModel(root, clusterElem);
AbstractService container = (AbstractService)root.getProducer("jdisc/container.0");
assertThat(container.getRelativePort(0), is(9000));
assertThat(container.getRelativePort(1), is(not(9001)));
}
@Test
public void fail_if_http_port_is_not_4080_in_hosted_vespa() throws Exception {
String servicesXml =
"<services>" +
"<admin version='3.0'>" +
" <nodes count='1'/>" +
"</admin>" +
"<jdisc version='1.0'>" +
" <http>" +
" <server port='9000' id='foo' />" +
" </http>" +
nodesXml +
"</jdisc>" +
"</services>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.properties(new TestProperties().setHostedVespa(true))
.build());
assertFalse(logger.msgs.isEmpty());
assertThat(logger.msgs.get(0).getSecond(), containsString(String.format("You cannot set port to anything else than %d", Container.BASEPORT)));
}
private static class MyLogger implements DeployLogger {
List<Pair<Level, String>> msgs = new ArrayList<>();
@Override
public void log(Level level, String message) {
msgs.add(new Pair<>(level, message));
}
}
@Test
public void one_cluster_with_explicit_port_and_one_without_is_ok() {
Element cluster1Elem = DomBuilderTest.parse(
"<jdisc id='cluster1' version='1.0' />");
Element cluster2Elem = DomBuilderTest.parse(
"<jdisc id='cluster2' version='1.0'>",
" <http>",
" <server port='8000' id='foo' />",
" </http>",
"</jdisc>");
createModel(root, cluster1Elem, cluster2Elem);
}
@Test
public void two_clusters_without_explicit_port_throws_exception() {
Element cluster1Elem = DomBuilderTest.parse(
"<jdisc id='cluster1' version='1.0'>",
nodesXml,
"</jdisc>" );
Element cluster2Elem = DomBuilderTest.parse(
"<jdisc id='cluster2' version='1.0'>",
nodesXml,
"</jdisc>" );
try {
createModel(root, cluster1Elem, cluster2Elem);
fail("Expected exception");
} catch (RuntimeException e) {
assertThat(e.getMessage(), containsString("cannot reserve port"));
}
}
@Test
public void verify_bindings_for_builtin_handlers() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0' />"
);
createModel(root, clusterElem);
JdiscBindingsConfig config = root.getConfig(JdiscBindingsConfig.class, "default/container.0");
JdiscBindingsConfig.Handlers defaultRootHandler = config.handlers(BindingsOverviewHandler.class.getName());
assertThat(defaultRootHandler.serverBindings(), contains("*:
JdiscBindingsConfig.Handlers applicationStatusHandler = config.handlers(ApplicationStatusHandler.class.getName());
assertThat(applicationStatusHandler.serverBindings(),
contains("http:
JdiscBindingsConfig.Handlers fileRequestHandler = config.handlers(VipStatusHandler.class.getName());
assertThat(fileRequestHandler.serverBindings(),
contains("http:
}
@Test
public void default_root_handler_is_disabled_when_user_adds_a_handler_with_same_binding() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>" +
" <handler id='userRootHandler'>" +
" <binding>" + ContainerCluster.ROOT_HANDLER_BINDING + "</binding>" +
" </handler>" +
"</jdisc>");
createModel(root, clusterElem);
ComponentsConfig.Components userRootHandler = getComponent(componentsConfig(), BindingsOverviewHandler.class.getName());
assertThat(userRootHandler, nullValue());
}
@Test
public void handler_bindings_are_included_in_discBindings_config() {
createClusterWithJDiscHandler();
String discBindingsConfig = root.getConfig(JdiscBindingsConfig.class, "default").toString();
assertThat(discBindingsConfig, containsString("{discHandler}"));
assertThat(discBindingsConfig, containsString(".serverBindings[0] \"binding0\""));
assertThat(discBindingsConfig, containsString(".serverBindings[1] \"binding1\""));
assertThat(discBindingsConfig, containsString(".clientBindings[0] \"clientBinding\""));
}
@Test
public void handlers_are_included_in_components_config() {
createClusterWithJDiscHandler();
assertThat(componentsConfig().toString(), containsString(".id \"discHandler\""));
}
private void createClusterWithJDiscHandler() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <handler id='discHandler'>",
" <binding>binding0</binding>",
" <binding>binding1</binding>",
" <clientBinding>clientBinding</clientBinding>",
" </handler>",
"</jdisc>");
createModel(root, clusterElem);
}
@Test
public void servlets_are_included_in_ServletPathConfig() {
createClusterWithServlet();
ServletPathsConfig servletPathsConfig = root.getConfig(ServletPathsConfig.class, "default");
assertThat(servletPathsConfig.servlets().values().iterator().next().path(), is("p/a/t/h"));
}
@Test
public void servletconfig_is_produced() {
createClusterWithServlet();
String configId = getContainerCluster("default").getServletMap().
values().iterator().next().getConfigId();
ServletConfigConfig servletConfig = root.getConfig(ServletConfigConfig.class, configId);
assertThat(servletConfig.map().get("myKey"), is("myValue"));
}
private void createClusterWithServlet() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <servlet id='myServlet' class='myClass' bundle='myBundle'>",
" <path>p/a/t/h</path>",
" <servlet-config>",
" <myKey>myValue</myKey>",
" </servlet-config>",
" </servlet>",
"</jdisc>");
createModel(root, clusterElem);
}
@Test
public void processing_handler_bindings_can_be_overridden() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <processing>",
" <binding>binding0</binding>",
" <binding>binding1</binding>",
" </processing>",
"</jdisc>");
createModel(root, clusterElem);
String discBindingsConfig = root.getConfig(JdiscBindingsConfig.class, "default").toString();
assertThat(discBindingsConfig, containsString(".serverBindings[0] \"binding0\""));
assertThat(discBindingsConfig, containsString(".serverBindings[1] \"binding1\""));
assertThat(discBindingsConfig, not(containsString("/processing/*")));
}
@Test
public void clientProvider_bindings_are_included_in_discBindings_config() {
createModelWithClientProvider();
String discBindingsConfig = root.getConfig(JdiscBindingsConfig.class, "default").toString();
assertThat(discBindingsConfig, containsString("{discClient}"));
assertThat(discBindingsConfig, containsString(".clientBindings[0] \"binding0\""));
assertThat(discBindingsConfig, containsString(".clientBindings[1] \"binding1\""));
assertThat(discBindingsConfig, containsString(".serverBindings[0] \"serverBinding\""));
}
@Test
public void clientProviders_are_included_in_components_config() {
createModelWithClientProvider();
assertThat(componentsConfig().toString(), containsString(".id \"discClient\""));
}
private void createModelWithClientProvider() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>" +
" <client id='discClient'>" +
" <binding>binding0</binding>" +
" <binding>binding1</binding>" +
" <serverBinding>serverBinding</serverBinding>" +
" </client>" +
"</jdisc>" );
createModel(root, clusterElem);
}
@Test
public void serverProviders_are_included_in_components_config() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>" +
" <server id='discServer' />" +
"</jdisc>" );
createModel(root, clusterElem);
String componentsConfig = componentsConfig().toString();
assertThat(componentsConfig, containsString(".id \"discServer\""));
}
private String getChainsConfig(String configId) {
return root.getConfig(ChainsConfig.class, configId).toString();
}
@Test
public void searchHandler_gets_only_search_chains_in_chains_config() {
createClusterWithProcessingAndSearchChains();
String searchHandlerConfigId = "default/component/com.yahoo.search.handler.SearchHandler";
String chainsConfig = getChainsConfig(searchHandlerConfigId);
assertThat(chainsConfig, containsLineWithPattern(".*\\.id \"testSearcher@default\"$"));
assertThat(chainsConfig, not(containsLineWithPattern(".*\\.id \"testProcessor@default\"$")));
}
@Test
public void processingHandler_gets_only_processing_chains_in_chains_config() {
createClusterWithProcessingAndSearchChains();
String processingHandlerConfigId = "default/component/com.yahoo.processing.handler.ProcessingHandler";
String chainsConfig = getChainsConfig(processingHandlerConfigId);
assertThat(chainsConfig, containsLineWithPattern(".*\\.id \"testProcessor@default\"$"));
assertThat(chainsConfig, not(containsLineWithPattern(".*\\.id \"testSearcher@default\"$")));
}
private void createClusterWithProcessingAndSearchChains() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>" +
" <search>" +
" <chain id='default'>" +
" <searcher id='testSearcher' />" +
" </chain>" +
" </search>" +
" <processing>" +
" <chain id='default'>" +
" <processor id='testProcessor'/>" +
" </chain>" +
" </processing>" +
nodesXml +
" </jdisc>");
createModel(root, clusterElem);
}
@Test
public void user_config_can_be_overridden_on_node() {
Element containerElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <config name=\"prelude.cluster.qr-monitor\">" +
" <requesttimeout>111</requesttimeout>",
" </config> " +
" <nodes>",
" <node hostalias='host1' />",
" <node hostalias='host2'>",
" <config name=\"prelude.cluster.qr-monitor\">",
" <requesttimeout>222</requesttimeout>",
" </config> ",
" </node>",
" </nodes>",
"</jdisc>");
root = ContentClusterUtils.createMockRoot(new String[]{"host1", "host2"});
createModel(root, containerElem);
ContainerCluster cluster = (ContainerCluster)root.getChildren().get("default");
assertThat(cluster.getContainers().size(), is(2));
assertEquals(root.getConfig(QrMonitorConfig.class, "default/container.0").requesttimeout(), 111);
assertEquals(root.getConfig(QrMonitorConfig.class, "default/container.1").requesttimeout(), 222);
}
@Test
public void nested_components_are_injected_to_handlers() throws Exception {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <handler id='myHandler'>",
" <component id='injected' />",
" </handler>",
" <client id='myClient'>",
" <component id='injected' />",
" </client>",
"</jdisc>");
createModel(root, clusterElem);
Component<?,?> handler = getContainerComponent("default", "myHandler");
assertThat(handler.getInjectedComponentIds(), hasItem("injected@myHandler"));
Component<?,?> client = getContainerComponent("default", "myClient");
assertThat(client.getInjectedComponentIds(), hasItem("injected@myClient"));
}
@Test
public void component_includes_are_added() {
VespaModelCreatorWithFilePkg creator = new VespaModelCreatorWithFilePkg("src/test/cfg/application/include_dirs");
VespaModel model = creator.create(true);
ContainerCluster cluster = model.getContainerClusters().get("default");
Map<ComponentId, Component<?, ?>> componentsMap = cluster.getComponentsMap();
Component<?,?> example = componentsMap.get(
ComponentId.fromString("test.Exampledocproc"));
assertThat(example.getComponentId().getName(), is("test.Exampledocproc"));
}
@Test
public void affinity_is_set() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <http>",
" <server port='" + getDefaults().vespaWebServicePort() + "' id='main' />",
" </http>",
" <nodes cpu-socket-affinity='true'>",
" <node hostalias='node1' />",
" </nodes>" +
"</jdisc>");
createModel(root, clusterElem);
assertTrue(getContainerCluster("default").getContainers().get(0).getAffinity().isPresent());
assertThat(getContainerCluster("default").getContainers().get(0).getAffinity().get().cpuSocket(), is(0));
}
@Test
public void singlenode_servicespec_is_used_with_hosts_xml() throws IOException, SAXException {
String servicesXml = "<jdisc id='default' version='1.0' />";
String hostsXml = "<hosts>\n" +
" <host name=\"test1.yahoo.com\">\n" +
" <alias>node1</alias>\n" +
" </host>\n" +
"</hosts>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder()
.withHosts(hostsXml)
.withServices(servicesXml)
.build();
VespaModel model = new VespaModel(applicationPackage);
assertThat(model.getHostSystem().getHosts().size(), is(1));
}
@Test
public void http_aliases_are_stored_on_cluster_and_on_service_properties() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <aliases>",
" <service-alias>service1</service-alias>",
" <service-alias>service2</service-alias>",
" <endpoint-alias>foo1.bar1.com</endpoint-alias>",
" <endpoint-alias>foo2.bar2.com</endpoint-alias>",
" </aliases>",
" <nodes>",
" <node hostalias='host1' />",
" </nodes>",
"</jdisc>");
createModel(root, clusterElem);
assertEquals(getContainerCluster("default").serviceAliases().get(0), "service1");
assertEquals(getContainerCluster("default").endpointAliases().get(0), "foo1.bar1.com");
assertEquals(getContainerCluster("default").serviceAliases().get(1), "service2");
assertEquals(getContainerCluster("default").endpointAliases().get(1), "foo2.bar2.com");
assertEquals(getContainerCluster("default").getContainers().get(0).getServicePropertyString("servicealiases"), "service1,service2");
assertEquals(getContainerCluster("default").getContainers().get(0).getServicePropertyString("endpointaliases"), "foo1.bar1.com,foo2.bar2.com");
}
@Test
public void http_aliases_are_only_honored_in_prod_environment() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <aliases>",
" <service-alias>service1</service-alias>",
" <endpoint-alias>foo1.bar1.com</endpoint-alias>",
" </aliases>",
" <nodes>",
" <node hostalias='host1' />",
" </nodes>",
"</jdisc>");
DeployState deployState = new DeployState.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east-1"))).build();
createModel(root, deployState, null, clusterElem);
assertEquals(0, getContainerCluster("default").serviceAliases().size());
assertEquals(0, getContainerCluster("default").endpointAliases().size());
assertNull(getContainerCluster("default").getContainers().get(0).getServicePropertyString("servicealiases"));
assertNull(getContainerCluster("default").getContainers().get(0).getServicePropertyString("endpointaliases"));
}
@Test
public void singlenode_servicespec_is_used_with_hosted_vespa() throws IOException, SAXException {
String servicesXml = "<jdisc id='default' version='1.0' />";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.modelHostProvisioner(new InMemoryProvisioner(true, "host1.yahoo.com", "host2.yahoo.com"))
.applicationPackage(applicationPackage)
.properties(new TestProperties()
.setMultitenant(true)
.setHostedVespa(true))
.build());
assertEquals(1, model.getHostSystem().getHosts().size());
}
@Test(expected = IllegalArgumentException.class)
public void renderers_named_JsonRenderer_are_not_allowed() {
createModel(root, generateContainerElementWithRenderer("JsonRenderer"));
}
@Test(expected = IllegalArgumentException.class)
public void renderers_named_DefaultRenderer_are_not_allowed() {
createModel(root, generateContainerElementWithRenderer("XmlRenderer"));
}
@Test
public void renderers_named_something_else_are_allowed() {
createModel(root, generateContainerElementWithRenderer("my-little-renderer"));
}
@Test
public void vip_status_handler_uses_file_for_hosted_vespa() throws Exception {
String servicesXml = "<services>" +
"<jdisc version='1.0'>" +
nodesXml +
"</jdisc>" +
"</services>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.properties(new TestProperties().setHostedVespa(true))
.build());
AbstractConfigProducerRoot modelRoot = model.getRoot();
VipStatusConfig vipStatusConfig = modelRoot.getConfig(VipStatusConfig.class, "jdisc/component/status.html-status-handler");
assertTrue(vipStatusConfig.accessdisk());
assertEquals(ContainerModelBuilder.HOSTED_VESPA_STATUS_FILE, vipStatusConfig.statusfile());
}
@Test
public void qrconfig_is_produced() throws IOException, SAXException {
String servicesXml =
"<services>" +
"<admin version='3.0'>" +
" <nodes count='1'/>" +
"</admin>" +
"<jdisc id ='default' version='1.0'>" +
" <nodes>" +
" <node hostalias='node1' />" +
" </nodes>" +
"</jdisc>" +
"</services>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder()
.withServices(servicesXml)
.build();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.properties(new TestProperties())
.build());
String hostname = HostName.getLocalhost();
QrConfig config = model.getConfig(QrConfig.class, "default/container.0");
assertEquals("default.container.0", config.discriminator());
assertEquals(19102, config.rpc().port());
assertEquals("vespa/service/default/container.0", config.rpc().slobrokId());
assertTrue(config.rpc().enabled());
assertEquals("", config.rpc().host());
assertFalse(config.restartOnDeploy());
assertEquals("filedistribution/" + hostname, config.filedistributor().configid());
}
@Test
public void secret_store_can_be_set_up() {
Element clusterElem = DomBuilderTest.parse(
"<jdisc version='1.0'>",
" <secret-store>",
" <group name='group1' environment='env1'/>",
" </secret-store>",
"</jdisc>");
createModel(root, clusterElem);
SecretStore secretStore = getContainerCluster("jdisc").getSecretStore().get();
assertEquals("group1", secretStore.getGroups().get(0).name);
assertEquals("env1", secretStore.getGroups().get(0).environment);
}
@Test
public void honours_environment_vars() {
Element clusterElem = DomBuilderTest.parse(
"<container version='1.0'>",
" <nodes>",
" <environment-variables>",
" <KMP_SETTING>1</KMP_SETTING>",
" <KMP_AFFINITY>granularity=fine,verbose,compact,1,0</KMP_AFFINITY>",
" </environment-variables>",
" <node hostalias='mockhost'/>",
" </nodes>",
"</container>" );
createModel(root, clusterElem);
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
root.getConfig(qrStartBuilder, "container/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals("KMP_SETTING=1 KMP_AFFINITY=granularity=fine,verbose,compact,1,0 ", qrStartConfig.qrs().env());
}
private Element generateContainerElementWithRenderer(String rendererId) {
return DomBuilderTest.parse(
"<jdisc id='default' version='1.0'>",
" <search>",
String.format(" <renderer id='%s'/>", rendererId),
" </search>",
"</jdisc>");
}
} | class ContainerModelBuilderTest extends ContainerModelBuilderTestBase {
@Test
@Test
public void detect_conflicting_jvmgcoptions_in_jvmargs() {
assertFalse(ContainerModelBuilder.incompatibleGCOptions(""));
assertFalse(ContainerModelBuilder.incompatibleGCOptions("UseG1GC"));
assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:+UseG1GC"));
assertTrue(ContainerModelBuilder.incompatibleGCOptions("abc -XX:+UseParNewGC xyz"));
assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:CMSInitiatingOccupancyFraction=19"));
}
@Test
public void honours_jvm_gc_options() {
Element clusterElem = DomBuilderTest.parse(
"<container version='1.0'>",
" <search/>",
" <nodes jvm-gc-options='-XX:+UseG1GC'>",
" <node hostalias='mockhost'/>",
" </nodes>",
"</container>" );
createModel(root, clusterElem);
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
root.getConfig(qrStartBuilder, "container/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals("-XX:+UseG1GC", qrStartConfig.jvm().gcopts());
}
private static void verifyIgnoreJvmGCOptions(boolean isHosted) throws IOException, SAXException {
verifyIgnoreJvmGCOptionsIfJvmArgs(isHosted, "jvmargs", ContainerCluster.G1GC);
verifyIgnoreJvmGCOptionsIfJvmArgs(isHosted, "jvm-options", "-XX:+UseG1GC");
}
private static void verifyIgnoreJvmGCOptionsIfJvmArgs(boolean isHosted, String jvmOptionsName, String expectedGC) throws IOException, SAXException {
String servicesXml =
"<container version='1.0'>" +
" <nodes jvm-gc-options='-XX:+UseG1GC' " + jvmOptionsName + "='-XX:+UseParNewGC'>" +
" <node hostalias='mockhost'/>" +
" </nodes>" +
"</container>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.zone(new Zone(SystemName.cd, Environment.dev, RegionName.from("here")))
.properties(new TestProperties().setHostedVespa(isHosted))
.build());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
model.getConfig(qrStartBuilder, "container/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals(expectedGC, qrStartConfig.jvm().gcopts());
}
@Test
public void ignores_jvmgcoptions_on_conflicting_jvmargs() throws IOException, SAXException {
verifyIgnoreJvmGCOptions(false);
verifyIgnoreJvmGCOptions(true);
}
private void verifyJvmGCOptions(boolean isHosted, String override, Zone zone, String expected) throws IOException, SAXException {
String servicesXml =
"<container version='1.0'>" +
" <nodes " + ((override == null) ? ">" : ("jvm-gc-options='" + override + "'>")) +
" <node hostalias='mockhost'/>" +
" </nodes>" +
"</container>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.zone(zone)
.properties(new TestProperties().setHostedVespa(isHosted))
.build());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
model.getConfig(qrStartBuilder, "container/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals(expected, qrStartConfig.jvm().gcopts());
}
private void verifyJvmGCOptions(boolean isHosted, Zone zone, String expected) throws IOException, SAXException {
verifyJvmGCOptions(isHosted, null, zone, expected);
verifyJvmGCOptions(isHosted, "-XX:+UseG1GC", zone, "-XX:+UseG1GC");
Zone DEV = new Zone(SystemName.dev, zone.environment(), zone.region());
verifyJvmGCOptions(isHosted, null, DEV, ContainerCluster.G1GC);
verifyJvmGCOptions(isHosted, "-XX:+UseConcMarkSweepGC", DEV, "-XX:+UseConcMarkSweepGC");
}
@Test
public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException {
verifyJvmGCOptions(false, Zone.defaultZone(),ContainerCluster.G1GC);
verifyJvmGCOptions(true, Zone.defaultZone(), ContainerCluster.G1GC);
}
@Test
public void default_port_is_4080() {
Element clusterElem = DomBuilderTest.parse(
"<container version='1.0'>",
nodesXml,
"</container>" );
createModel(root, clusterElem);
AbstractService container = (AbstractService)root.getProducer("container/container.0");
assertThat(container.getRelativePort(0), is(getDefaults().vespaWebServicePort()));
}
@Test
public void http_server_port_is_configurable_and_does_not_affect_other_ports() {
Element clusterElem = DomBuilderTest.parse(
"<container version='1.0'>",
" <http>",
" <server port='9000' id='foo' />",
" </http>",
nodesXml,
"</container>" );
createModel(root, clusterElem);
AbstractService container = (AbstractService)root.getProducer("container/container.0");
assertThat(container.getRelativePort(0), is(9000));
assertThat(container.getRelativePort(1), is(not(9001)));
}
@Test
public void fail_if_http_port_is_not_4080_in_hosted_vespa() throws Exception {
String servicesXml =
"<services>" +
"<admin version='3.0'>" +
" <nodes count='1'/>" +
"</admin>" +
"<container version='1.0'>" +
" <http>" +
" <server port='9000' id='foo' />" +
" </http>" +
nodesXml +
"</container>" +
"</services>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
final MyLogger logger = new MyLogger();
new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.deployLogger(logger)
.properties(new TestProperties().setHostedVespa(true))
.build());
assertFalse(logger.msgs.isEmpty());
assertThat(logger.msgs.get(0).getSecond(), containsString(String.format("You cannot set port to anything else than %d", Container.BASEPORT)));
}
private static class MyLogger implements DeployLogger {
List<Pair<Level, String>> msgs = new ArrayList<>();
@Override
public void log(Level level, String message) {
msgs.add(new Pair<>(level, message));
}
}
@Test
public void one_cluster_with_explicit_port_and_one_without_is_ok() {
Element cluster1Elem = DomBuilderTest.parse(
"<container id='cluster1' version='1.0' />");
Element cluster2Elem = DomBuilderTest.parse(
"<container id='cluster2' version='1.0'>",
" <http>",
" <server port='8000' id='foo' />",
" </http>",
"</container>");
createModel(root, cluster1Elem, cluster2Elem);
}
@Test
public void two_clusters_without_explicit_port_throws_exception() {
Element cluster1Elem = DomBuilderTest.parse(
"<container id='cluster1' version='1.0'>",
nodesXml,
"</container>" );
Element cluster2Elem = DomBuilderTest.parse(
"<container id='cluster2' version='1.0'>",
nodesXml,
"</container>" );
try {
createModel(root, cluster1Elem, cluster2Elem);
fail("Expected exception");
} catch (RuntimeException e) {
assertThat(e.getMessage(), containsString("cannot reserve port"));
}
}
@Test
public void verify_bindings_for_builtin_handlers() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0' />"
);
createModel(root, clusterElem);
JdiscBindingsConfig config = root.getConfig(JdiscBindingsConfig.class, "default/container.0");
JdiscBindingsConfig.Handlers defaultRootHandler = config.handlers(BindingsOverviewHandler.class.getName());
assertThat(defaultRootHandler.serverBindings(), contains("*:
JdiscBindingsConfig.Handlers applicationStatusHandler = config.handlers(ApplicationStatusHandler.class.getName());
assertThat(applicationStatusHandler.serverBindings(),
contains("http:
JdiscBindingsConfig.Handlers fileRequestHandler = config.handlers(VipStatusHandler.class.getName());
assertThat(fileRequestHandler.serverBindings(),
contains("http:
}
@Test
public void default_root_handler_is_disabled_when_user_adds_a_handler_with_same_binding() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>" +
" <handler id='userRootHandler'>" +
" <binding>" + ContainerCluster.ROOT_HANDLER_BINDING + "</binding>" +
" </handler>" +
"</container>");
createModel(root, clusterElem);
ComponentsConfig.Components userRootHandler = getComponent(componentsConfig(), BindingsOverviewHandler.class.getName());
assertThat(userRootHandler, nullValue());
}
@Test
public void handler_bindings_are_included_in_discBindings_config() {
createClusterWithJDiscHandler();
String discBindingsConfig = root.getConfig(JdiscBindingsConfig.class, "default").toString();
assertThat(discBindingsConfig, containsString("{discHandler}"));
assertThat(discBindingsConfig, containsString(".serverBindings[0] \"binding0\""));
assertThat(discBindingsConfig, containsString(".serverBindings[1] \"binding1\""));
assertThat(discBindingsConfig, containsString(".clientBindings[0] \"clientBinding\""));
}
@Test
public void handlers_are_included_in_components_config() {
createClusterWithJDiscHandler();
assertThat(componentsConfig().toString(), containsString(".id \"discHandler\""));
}
private void createClusterWithJDiscHandler() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <handler id='discHandler'>",
" <binding>binding0</binding>",
" <binding>binding1</binding>",
" <clientBinding>clientBinding</clientBinding>",
" </handler>",
"</container>");
createModel(root, clusterElem);
}
@Test
public void servlets_are_included_in_ServletPathConfig() {
createClusterWithServlet();
ServletPathsConfig servletPathsConfig = root.getConfig(ServletPathsConfig.class, "default");
assertThat(servletPathsConfig.servlets().values().iterator().next().path(), is("p/a/t/h"));
}
@Test
public void servletconfig_is_produced() {
createClusterWithServlet();
String configId = getContainerCluster("default").getServletMap().
values().iterator().next().getConfigId();
ServletConfigConfig servletConfig = root.getConfig(ServletConfigConfig.class, configId);
assertThat(servletConfig.map().get("myKey"), is("myValue"));
}
private void createClusterWithServlet() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <servlet id='myServlet' class='myClass' bundle='myBundle'>",
" <path>p/a/t/h</path>",
" <servlet-config>",
" <myKey>myValue</myKey>",
" </servlet-config>",
" </servlet>",
"</container>");
createModel(root, clusterElem);
}
@Test
public void processing_handler_bindings_can_be_overridden() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <processing>",
" <binding>binding0</binding>",
" <binding>binding1</binding>",
" </processing>",
"</container>");
createModel(root, clusterElem);
String discBindingsConfig = root.getConfig(JdiscBindingsConfig.class, "default").toString();
assertThat(discBindingsConfig, containsString(".serverBindings[0] \"binding0\""));
assertThat(discBindingsConfig, containsString(".serverBindings[1] \"binding1\""));
assertThat(discBindingsConfig, not(containsString("/processing/*")));
}
@Test
public void clientProvider_bindings_are_included_in_discBindings_config() {
createModelWithClientProvider();
String discBindingsConfig = root.getConfig(JdiscBindingsConfig.class, "default").toString();
assertThat(discBindingsConfig, containsString("{discClient}"));
assertThat(discBindingsConfig, containsString(".clientBindings[0] \"binding0\""));
assertThat(discBindingsConfig, containsString(".clientBindings[1] \"binding1\""));
assertThat(discBindingsConfig, containsString(".serverBindings[0] \"serverBinding\""));
}
@Test
public void clientProviders_are_included_in_components_config() {
createModelWithClientProvider();
assertThat(componentsConfig().toString(), containsString(".id \"discClient\""));
}
private void createModelWithClientProvider() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>" +
" <client id='discClient'>" +
" <binding>binding0</binding>" +
" <binding>binding1</binding>" +
" <serverBinding>serverBinding</serverBinding>" +
" </client>" +
"</container>" );
createModel(root, clusterElem);
}
@Test
public void serverProviders_are_included_in_components_config() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>" +
" <server id='discServer' />" +
"</container>" );
createModel(root, clusterElem);
String componentsConfig = componentsConfig().toString();
assertThat(componentsConfig, containsString(".id \"discServer\""));
}
private String getChainsConfig(String configId) {
return root.getConfig(ChainsConfig.class, configId).toString();
}
@Test
public void searchHandler_gets_only_search_chains_in_chains_config() {
createClusterWithProcessingAndSearchChains();
String searchHandlerConfigId = "default/component/com.yahoo.search.handler.SearchHandler";
String chainsConfig = getChainsConfig(searchHandlerConfigId);
assertThat(chainsConfig, containsLineWithPattern(".*\\.id \"testSearcher@default\"$"));
assertThat(chainsConfig, not(containsLineWithPattern(".*\\.id \"testProcessor@default\"$")));
}
@Test
public void processingHandler_gets_only_processing_chains_in_chains_config() {
createClusterWithProcessingAndSearchChains();
String processingHandlerConfigId = "default/component/com.yahoo.processing.handler.ProcessingHandler";
String chainsConfig = getChainsConfig(processingHandlerConfigId);
assertThat(chainsConfig, containsLineWithPattern(".*\\.id \"testProcessor@default\"$"));
assertThat(chainsConfig, not(containsLineWithPattern(".*\\.id \"testSearcher@default\"$")));
}
private void createClusterWithProcessingAndSearchChains() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>" +
" <search>" +
" <chain id='default'>" +
" <searcher id='testSearcher' />" +
" </chain>" +
" </search>" +
" <processing>" +
" <chain id='default'>" +
" <processor id='testProcessor'/>" +
" </chain>" +
" </processing>" +
nodesXml +
" </container>");
createModel(root, clusterElem);
}
@Test
public void user_config_can_be_overridden_on_node() {
Element containerElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <config name=\"prelude.cluster.qr-monitor\">" +
" <requesttimeout>111</requesttimeout>",
" </config> " +
" <nodes>",
" <node hostalias='host1' />",
" <node hostalias='host2'>",
" <config name=\"prelude.cluster.qr-monitor\">",
" <requesttimeout>222</requesttimeout>",
" </config> ",
" </node>",
" </nodes>",
"</container>");
root = ContentClusterUtils.createMockRoot(new String[]{"host1", "host2"});
createModel(root, containerElem);
ContainerCluster cluster = (ContainerCluster)root.getChildren().get("default");
assertThat(cluster.getContainers().size(), is(2));
assertEquals(root.getConfig(QrMonitorConfig.class, "default/container.0").requesttimeout(), 111);
assertEquals(root.getConfig(QrMonitorConfig.class, "default/container.1").requesttimeout(), 222);
}
@Test
public void nested_components_are_injected_to_handlers() throws Exception {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <handler id='myHandler'>",
" <component id='injected' />",
" </handler>",
" <client id='myClient'>",
" <component id='injected' />",
" </client>",
"</container>");
createModel(root, clusterElem);
Component<?,?> handler = getContainerComponent("default", "myHandler");
assertThat(handler.getInjectedComponentIds(), hasItem("injected@myHandler"));
Component<?,?> client = getContainerComponent("default", "myClient");
assertThat(client.getInjectedComponentIds(), hasItem("injected@myClient"));
}
@Test
public void component_includes_are_added() {
VespaModelCreatorWithFilePkg creator = new VespaModelCreatorWithFilePkg("src/test/cfg/application/include_dirs");
VespaModel model = creator.create(true);
ContainerCluster cluster = model.getContainerClusters().get("default");
Map<ComponentId, Component<?, ?>> componentsMap = cluster.getComponentsMap();
Component<?,?> example = componentsMap.get(
ComponentId.fromString("test.Exampledocproc"));
assertThat(example.getComponentId().getName(), is("test.Exampledocproc"));
}
@Test
public void affinity_is_set() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <http>",
" <server port='" + getDefaults().vespaWebServicePort() + "' id='main' />",
" </http>",
" <nodes cpu-socket-affinity='true'>",
" <node hostalias='node1' />",
" </nodes>" +
"</container>");
createModel(root, clusterElem);
assertTrue(getContainerCluster("default").getContainers().get(0).getAffinity().isPresent());
assertThat(getContainerCluster("default").getContainers().get(0).getAffinity().get().cpuSocket(), is(0));
}
@Test
public void singlenode_servicespec_is_used_with_hosts_xml() throws IOException, SAXException {
String servicesXml = "<container id='default' version='1.0' />";
String hostsXml = "<hosts>\n" +
" <host name=\"test1.yahoo.com\">\n" +
" <alias>node1</alias>\n" +
" </host>\n" +
"</hosts>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder()
.withHosts(hostsXml)
.withServices(servicesXml)
.build();
VespaModel model = new VespaModel(applicationPackage);
assertThat(model.getHostSystem().getHosts().size(), is(1));
}
@Test
public void http_aliases_are_stored_on_cluster_and_on_service_properties() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <aliases>",
" <service-alias>service1</service-alias>",
" <service-alias>service2</service-alias>",
" <endpoint-alias>foo1.bar1.com</endpoint-alias>",
" <endpoint-alias>foo2.bar2.com</endpoint-alias>",
" </aliases>",
" <nodes>",
" <node hostalias='host1' />",
" </nodes>",
"</container>");
createModel(root, clusterElem);
assertEquals(getContainerCluster("default").serviceAliases().get(0), "service1");
assertEquals(getContainerCluster("default").endpointAliases().get(0), "foo1.bar1.com");
assertEquals(getContainerCluster("default").serviceAliases().get(1), "service2");
assertEquals(getContainerCluster("default").endpointAliases().get(1), "foo2.bar2.com");
assertEquals(getContainerCluster("default").getContainers().get(0).getServicePropertyString("servicealiases"), "service1,service2");
assertEquals(getContainerCluster("default").getContainers().get(0).getServicePropertyString("endpointaliases"), "foo1.bar1.com,foo2.bar2.com");
}
@Test
public void http_aliases_are_only_honored_in_prod_environment() {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <aliases>",
" <service-alias>service1</service-alias>",
" <endpoint-alias>foo1.bar1.com</endpoint-alias>",
" </aliases>",
" <nodes>",
" <node hostalias='host1' />",
" </nodes>",
"</container>");
DeployState deployState = new DeployState.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east-1"))).build();
createModel(root, deployState, null, clusterElem);
assertEquals(0, getContainerCluster("default").serviceAliases().size());
assertEquals(0, getContainerCluster("default").endpointAliases().size());
assertNull(getContainerCluster("default").getContainers().get(0).getServicePropertyString("servicealiases"));
assertNull(getContainerCluster("default").getContainers().get(0).getServicePropertyString("endpointaliases"));
}
@Test
public void singlenode_servicespec_is_used_with_hosted_vespa() throws IOException, SAXException {
String servicesXml = "<container id='default' version='1.0' />";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.modelHostProvisioner(new InMemoryProvisioner(true, "host1.yahoo.com", "host2.yahoo.com"))
.applicationPackage(applicationPackage)
.properties(new TestProperties()
.setMultitenant(true)
.setHostedVespa(true))
.build());
assertEquals(1, model.getHostSystem().getHosts().size());
}
@Test(expected = IllegalArgumentException.class)
public void renderers_named_JsonRenderer_are_not_allowed() {
createModel(root, generateContainerElementWithRenderer("JsonRenderer"));
}
@Test(expected = IllegalArgumentException.class)
public void renderers_named_DefaultRenderer_are_not_allowed() {
createModel(root, generateContainerElementWithRenderer("XmlRenderer"));
}
@Test
public void renderers_named_something_else_are_allowed() {
createModel(root, generateContainerElementWithRenderer("my-little-renderer"));
}
@Test
public void vip_status_handler_uses_file_for_hosted_vespa() throws Exception {
String servicesXml = "<services>" +
"<container version='1.0'>" +
nodesXml +
"</container>" +
"</services>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.properties(new TestProperties().setHostedVespa(true))
.build());
AbstractConfigProducerRoot modelRoot = model.getRoot();
VipStatusConfig vipStatusConfig = modelRoot.getConfig(VipStatusConfig.class, "container/component/status.html-status-handler");
assertTrue(vipStatusConfig.accessdisk());
assertEquals(ContainerModelBuilder.HOSTED_VESPA_STATUS_FILE, vipStatusConfig.statusfile());
}
@Test
public void qrconfig_is_produced() throws IOException, SAXException {
String servicesXml =
"<services>" +
"<admin version='3.0'>" +
" <nodes count='1'/>" +
"</admin>" +
"<container id ='default' version='1.0'>" +
" <nodes>" +
" <node hostalias='node1' />" +
" </nodes>" +
"</container>" +
"</services>";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder()
.withServices(servicesXml)
.build();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(applicationPackage)
.properties(new TestProperties())
.build());
String hostname = HostName.getLocalhost();
QrConfig config = model.getConfig(QrConfig.class, "default/container.0");
assertEquals("default.container.0", config.discriminator());
assertEquals(19102, config.rpc().port());
assertEquals("vespa/service/default/container.0", config.rpc().slobrokId());
assertTrue(config.rpc().enabled());
assertEquals("", config.rpc().host());
assertFalse(config.restartOnDeploy());
assertEquals("filedistribution/" + hostname, config.filedistributor().configid());
}
@Test
public void secret_store_can_be_set_up() {
Element clusterElem = DomBuilderTest.parse(
"<container version='1.0'>",
" <secret-store>",
" <group name='group1' environment='env1'/>",
" </secret-store>",
"</container>");
createModel(root, clusterElem);
SecretStore secretStore = getContainerCluster("container").getSecretStore().get();
assertEquals("group1", secretStore.getGroups().get(0).name);
assertEquals("env1", secretStore.getGroups().get(0).environment);
}
@Test
public void honours_environment_vars() {
Element clusterElem = DomBuilderTest.parse(
"<container version='1.0'>",
" <nodes>",
" <environment-variables>",
" <KMP_SETTING>1</KMP_SETTING>",
" <KMP_AFFINITY>granularity=fine,verbose,compact,1,0</KMP_AFFINITY>",
" </environment-variables>",
" <node hostalias='mockhost'/>",
" </nodes>",
"</container>" );
createModel(root, clusterElem);
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
root.getConfig(qrStartBuilder, "container/container.0");
QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder);
assertEquals("KMP_SETTING=1 KMP_AFFINITY=granularity=fine,verbose,compact,1,0 ", qrStartConfig.qrs().env());
}
private Element generateContainerElementWithRenderer(String rendererId) {
return DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <search>",
String.format(" <renderer id='%s'/>", rendererId),
" </search>",
"</container>");
}
} |
This was intended for the last commit, obviously >_< | private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the zone application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
/*
* Deploy direct is when we want to redeploy the current application - retrieve version
* info from the application package before deploying
*/
if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) {
Optional<Deployment> deployment = controller.applications().get(applicationId)
.map(Application::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(!deployment.isPresent())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass,
Optional.of(requireUserPrincipal(request)));
return new SlimeJsonResponse(toSlime(result));
} | Map<String, byte[]> dataParts = parseDataParts(request); | private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the zone application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
/*
* Deploy direct is when we want to redeploy the current application - retrieve version
* info from the application package before deploying
*/
if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) {
Optional<Deployment> deployment = controller.applications().get(applicationId)
.map(Application::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(!deployment.isPresent())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass,
Optional.of(requireUserPrincipal(request)));
return new SlimeJsonResponse(toSlime(result));
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return ErrorResponse.from(e);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/user")) return authenticatedUser(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/user")) return createUser(request);
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}"))
return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyJsonResponse response = new EmptyJsonResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "user", "tenant");
}
private HttpResponse authenticatedUser(HttpRequest request) {
Principal user = requireUserPrincipal(request);
if (user == null)
throw new NotAuthorizedException("You must be authenticated.");
String userName = user instanceof AthenzPrincipal ? ((AthenzPrincipal) user).getIdentity().getName() : user.getName();
TenantName tenantName = TenantName.from(UserTenant.normalizeUser(userName));
List<Tenant> tenants = controller.tenants().asList(new Credentials(user));
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setString("user", userName);
Cursor tenantsArray = response.setArray("tenants");
for (Tenant tenant : tenants)
tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject());
response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant.name().equals(tenantName)));
return new SlimeJsonResponse(slime);
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Slime slime = new Slime();
Cursor array = slime.setArray();
for (Application application : controller.applications().asList(tenant))
toSlime(application, array.addObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockOrThrow(ApplicationId.from(tenantName, applicationName, "default"), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.type() == Type.NIX ? null : pemDeployKeyField.asString();
application = application.withPemDeployKey(pemDeployKey);
messageBuilder.add("Set pem deploy key to " + (pemDeployKey == null ? "empty" : pemDeployKey));
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
return controller.applications().get(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
List<Node> nodes = controller.configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.canonicalFlavor());
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case unorchestrated: return "unorchestrated";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
if (queryParameters.containsKey("streaming")) {
InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters);
Slime slime = new Slime();
Cursor object = slime.setObject();
if (response.isPresent()) {
response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
String triggered = controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName())
.stream().map(JobType::jobName).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + application.id().instance().value() + "/job/",
request.getUri()).toString());
application.deploymentJobs().statusOf(JobType.component)
.flatMap(JobStatus::lastSuccess)
.map(run -> run.application().source())
.ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source")));
application.deploymentJobs().projectId()
.ifPresent(id -> object.setLong("projectId", id));
if ( ! application.change().isEmpty()) {
toSlime(object.setObject("deploying"), application.change());
}
if ( ! application.outstandingChange().isEmpty()) {
toSlime(object.setObject("outstandingChange"), application.outstandingChange());
}
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedJobs(application.deploymentJobs().jobStatus().values());
object.setBool("deployedInternally", application.deploymentJobs().deployedInternally());
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
});
object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor globalRotationsArray = object.setArray("globalRotations");
application.endpointsIn(controller.system())
.scope(Endpoint.Scope.global)
.legacy(false)
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalRotationsArray::addString);
application.rotation().ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id());
for (RoutingPolicy policy : routingPolicies) {
policy.rotationEndpointsIn(controller.system()).asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalRotationsArray::addString);
}
List<Deployment> deployments = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedDeployments(application.deployments().values());
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) {
toSlime(application.rotationStatus(deployment), deploymentObject);
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", application.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value() +
"/instance/" + application.id().instance().value(),
request.getUri()).toString());
}
}
application.pemDeployKey().ifPresent(key -> object.setString("pemDeployKey", key));
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().get(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(application.id(),
ZoneId.from(environment, region));
Deployment deployment = application.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var endpointArray = response.setArray("endpoints");
for (var policy : controller.applications().routingPolicies(deploymentId.applicationId())) {
Cursor endpointObject = endpointArray.addObject();
Endpoint endpoint = policy.endpointIn(controller.system());
endpointObject.setString("cluster", policy.cluster().value());
endpointObject.setBool("tls", endpoint.tls());
endpointObject.setString("url", endpoint.url().toString());
}
Cursor serviceUrlArray = response.setArray("serviceUrls");
controller.applications().getDeploymentEndpoints(deploymentId)
.ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId()
.ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentCost appCost = deployment.calculateCost();
Cursor costObject = response.setObject("cost");
toSlime(appCost, costObject);
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if (!applicationVersion.isUnknown()) {
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if ( ! revision.isPresent()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationStatus status, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", status.name().toUpperCase());
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Inspector requestData = toSlime(request.getData()).get();
String reason = mandatory("reason", requestData).asString();
String agent = requireUserPrincipal(request).getName();
long timestamp = controller.clock().instant().getEpochSecond();
EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp);
controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()),
endpointStatus);
return new MessageResponse(String.format("Successfully set %s in %s.%s %s service",
application.id().toShortString(),
deployment.zone().environment().value(),
deployment.zone().region().value(),
inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId);
for (RoutingEndpoint endpoint : status.keySet()) {
EndpointStatus currentStatus = status.get(endpoint);
array.addString(endpoint.upstreamName());
Cursor statusObject = array.addObject();
statusObject.setString("status", currentStatus.getStatus().name());
statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason());
statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent());
statusObject.setLong("timestamp", currentStatus.getEpoch());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().require(applicationId);
ZoneId zone = ZoneId.from(environment, region);
if (!application.rotation().isPresent()) {
throw new NotExistsException("global rotation does not exist for " + application);
}
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(application.rotationStatus(deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenant, String application, HttpRequest request) {
Application app = controller.applications().require(ApplicationId.from(tenant, application, "default"));
Slime slime = new Slime();
Cursor root = slime.setObject();
if (!app.change().isEmpty()) {
app.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", app.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse createUser(HttpRequest request) {
String user = Optional.of(requireUserPrincipal(request))
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzIdentity::getName)
.map(UserTenant::normalizeUser)
.orElseThrow(() -> new ForbiddenException("Not authenticated or not a user."));
UserTenant tenant = UserTenant.create(user);
try {
controller.tenants().createUser(tenant);
return new MessageResponse("Created user '" + user + "'");
} catch (AlreadyExistsException e) {
return new MessageResponse("User '" + user + "' already exists");
}
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
try {
Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user
? Optional.empty()
: Optional.of(accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()));
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(application, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
catch (ZmsClientException e) {
if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN)
throw new ForbiddenException("Not authorized to create application", e);
else
throw e;
}
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = application.get().change();
if (change.isEmpty()) {
response.append("No deployment in progress for " + application + " at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '" + change + "' to '" +
controller.applications().require(id).change() + "' for " + application);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if ( ! tenant.isPresent())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
if (tenant.get().type() == Tenant.Type.user)
controller.tenants().deleteUser((UserTenant) tenant.get());
else
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user
? Optional.empty()
: Optional.of(accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()));
controller.applications().deleteApplication(id, credentials);
return new EmptyJsonResponse();
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
controller.applications().deactivate(application.id(), ZoneId.from(environment, region));
return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
/**
* Promote application Chef environments. To be used by component jobs only
*/
private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) {
try{
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.systemChefEnvironment();
String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
/**
* Promote application Chef environments for jobs that deploy applications
*/
private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) {
try {
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) {
try {
DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get());
if ( report.jobType() == JobType.component
&& controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally())
throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " +
"longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.");
controller.applications().deploymentTrigger().notifyOfCompletion(report);
return new MessageResponse("ok");
} catch (IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
}
private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) {
Optional<DeploymentJobs.JobError> jobError = Optional.empty();
if (report.field("jobError").valid()) {
jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString()));
}
ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString());
JobType type = JobType.fromJobName(report.field("jobName").asString());
long buildNumber = report.field("buildNumber").asLong();
if (type == JobType.component)
return DeploymentJobs.JobReport.ofComponent(id,
report.field("projectId").asLong(),
buildNumber,
jobError,
toSourceRevision(report.field("sourceRevision")));
else
return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError);
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tentantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case user: break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : controller.applications().asList(tenant.name())) {
if (application.id().instance().isDefault()) {
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), application, request);
else
toSlime(application, applicationArray.addObject(), request);
}
}
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tentantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case user: break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(JobStatus.JobRun jobRun, Cursor object) {
object.setLong("id", jobRun.id());
object.setString("version", jobRun.platform().toFullString());
if (!jobRun.application().isUnknown())
toSlime(jobRun.application(), object.setObject("revision"));
object.setString("reason", jobRun.reason());
object.setLong("at", jobRun.at().toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(Application application, Cursor object, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(), request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
public static void toSlime(DeploymentCost deploymentCost, Cursor object) {
object.setLong("tco", (long)deploymentCost.getTco());
object.setLong("waste", (long)deploymentCost.getWaste());
object.setDouble("utilization", deploymentCost.getUtilization());
Cursor clustersObject = object.setObject("cluster");
for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet())
toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey()));
}
private static void toSlime(ClusterCost clusterCost, Cursor object) {
object.setLong("count", clusterCost.getClusterInfo().getHostnames().size());
object.setString("resource", getResourceName(clusterCost.getResultUtilization()));
object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization());
object.setLong("tco", (int)clusterCost.getTco());
object.setLong("waste", (int)clusterCost.getWaste());
object.setString("flavor", clusterCost.getClusterInfo().getFlavor());
object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost());
object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU());
object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem());
object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk());
object.setString("type", clusterCost.getClusterInfo().getClusterType().name());
Cursor utilObject = object.setObject("util");
utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu());
utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory());
utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk());
utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy());
Cursor usageObject = object.setObject("usage");
usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu());
usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory());
usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk());
usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy());
Cursor hostnamesArray = object.setArray("hostnames");
for (String hostname : clusterCost.getClusterInfo().getHostnames())
hostnamesArray.addString(hostname);
}
private static String getResourceName(ClusterUtilization utilization) {
String name = "cpu";
double max = utilization.getMaxUtilization();
if (utilization.getMemory() == max) {
name = "mem";
} else if (utilization.getDisk() == max) {
name = "disk";
} else if (utilization.getDiskBusy() == max) {
name = "diskbusy";
}
return name;
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static String tentantType(Tenant tenant) {
switch (tenant.type()) {
case user: return "USER";
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
SourceRevision sourceRevision = toSourceRevision(submitOptions);
String authorEmail = submitOptions.field("authorEmail").asString();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("x-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
} | class ApplicationApiHandler extends LoggingRequestHandler {
private static final String OPTIONAL_PREFIX = "/api";
private final Controller controller;
private final AccessControlRequests accessControlRequests;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
super(parentCtx);
this.controller = controller;
this.accessControlRequests = accessControlRequests;
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
Path path = new Path(request.getUri(), OPTIONAL_PREFIX);
switch (request.getMethod()) {
case GET: return handleGET(path, request);
case PUT: return handlePUT(path, request);
case POST: return handlePOST(path, request);
case PATCH: return handlePATCH(path, request);
case DELETE: return handleDELETE(path, request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return ErrorResponse.from(e);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(Path path, HttpRequest request) {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/user")) return authenticatedUser(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(Path path, HttpRequest request) {
if (path.matches("/application/v4/user")) return createUser(request);
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}"))
return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyJsonResponse response = new EmptyJsonResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "user", "tenant");
}
private HttpResponse authenticatedUser(HttpRequest request) {
Principal user = requireUserPrincipal(request);
if (user == null)
throw new NotAuthorizedException("You must be authenticated.");
String userName = user instanceof AthenzPrincipal ? ((AthenzPrincipal) user).getIdentity().getName() : user.getName();
TenantName tenantName = TenantName.from(UserTenant.normalizeUser(userName));
List<Tenant> tenants = controller.tenants().asList(new Credentials(user));
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setString("user", userName);
Cursor tenantsArray = response.setArray("tenants");
for (Tenant tenant : tenants)
tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject());
response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant.name().equals(tenantName)));
return new SlimeJsonResponse(slime);
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().get(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request);
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Slime slime = new Slime();
Cursor array = slime.setArray();
for (Application application : controller.applications().asList(tenant))
toSlime(application, array.addObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
controller.applications().lockOrThrow(ApplicationId.from(tenantName, applicationName, "default"), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
application = application.withMajorVersion(majorVersion);
messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion));
}
Inspector pemDeployKeyField = requestObject.field("pemDeployKey");
if (pemDeployKeyField.valid()) {
String pemDeployKey = pemDeployKeyField.type() == Type.NIX ? null : pemDeployKeyField.asString();
application = application.withPemDeployKey(pemDeployKey);
messageBuilder.add("Set pem deploy key to " + (pemDeployKey == null ? "empty" : pemDeployKey));
}
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
private Application getApplication(String tenantName, String applicationName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
return controller.applications().get(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
List<Node> nodes = controller.configServer().nodeRepository().list(zone, id);
Slime slime = new Slime();
Cursor nodesArray = slime.setObject().setArray("nodes");
for (Node node : nodes) {
Cursor nodeObject = nodesArray.addObject();
nodeObject.setString("hostname", node.hostname().value());
nodeObject.setString("state", valueOf(node.state()));
nodeObject.setString("orchestration", valueOf(node.serviceState()));
nodeObject.setString("version", node.currentVersion().toString());
nodeObject.setString("flavor", node.canonicalFlavor());
nodeObject.setString("clusterId", node.clusterId());
nodeObject.setString("clusterType", valueOf(node.clusterType()));
}
return new SlimeJsonResponse(slime);
}
private static String valueOf(Node.State state) {
switch (state) {
case failed: return "failed";
case parked: return "parked";
case dirty: return "dirty";
case ready: return "ready";
case active: return "active";
case inactive: return "inactive";
case reserved: return "reserved";
case provisioned: return "provisioned";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ServiceState state) {
switch (state) {
case expectedUp: return "expectedUp";
case allowedDown: return "allowedDown";
case unorchestrated: return "unorchestrated";
default: throw new IllegalArgumentException("Unexpected node state '" + state + "'.");
}
}
private static String valueOf(Node.ClusterType type) {
switch (type) {
case admin: return "admin";
case content: return "content";
case container: return "container";
default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'.");
}
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
if (queryParameters.containsKey("streaming")) {
InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters);
Slime slime = new Slime();
Cursor object = slime.setObject();
if (response.isPresent()) {
response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
String triggered = controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName())
.stream().map(JobType::jobName).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + application.id().instance().value() + "/job/",
request.getUri()).toString());
application.deploymentJobs().statusOf(JobType.component)
.flatMap(JobStatus::lastSuccess)
.map(run -> run.application().source())
.ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source")));
application.deploymentJobs().projectId()
.ifPresent(id -> object.setLong("projectId", id));
if ( ! application.change().isEmpty()) {
toSlime(object.setObject("deploying"), application.change());
}
if ( ! application.outstandingChange().isEmpty()) {
toSlime(object.setObject("outstandingChange"), application.outstandingChange());
}
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedJobs(application.deploymentJobs().jobStatus().values());
object.setBool("deployedInternally", application.deploymentJobs().deployedInternally());
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
});
object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor globalRotationsArray = object.setArray("globalRotations");
application.endpointsIn(controller.system())
.scope(Endpoint.Scope.global)
.legacy(false)
.asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalRotationsArray::addString);
application.rotation().ifPresent(rotation -> object.setString("rotationId", rotation.asString()));
Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id());
for (RoutingPolicy policy : routingPolicies) {
policy.rotationEndpointsIn(controller.system()).asList().stream()
.map(Endpoint::url)
.map(URI::toString)
.forEach(globalRotationsArray::addString);
}
List<Deployment> deployments = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedDeployments(application.deployments().values());
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) {
toSlime(application.rotationStatus(deployment), deploymentObject);
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request);
else {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", application.id().instance().value());
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value() +
"/instance/" + application.id().instance().value(),
request.getUri()).toString());
}
}
application.pemDeployKey().ifPresent(key -> object.setString("pemDeployKey", key));
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().get(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(application.id(),
ZoneId.from(environment, region));
Deployment deployment = application.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
response.setString("tenant", deploymentId.applicationId().tenant().value());
response.setString("application", deploymentId.applicationId().application().value());
response.setString("instance", deploymentId.applicationId().instance().value());
response.setString("environment", deploymentId.zoneId().environment().value());
response.setString("region", deploymentId.zoneId().region().value());
var endpointArray = response.setArray("endpoints");
for (var policy : controller.applications().routingPolicies(deploymentId.applicationId())) {
Cursor endpointObject = endpointArray.addObject();
Endpoint endpoint = policy.endpointIn(controller.system());
endpointObject.setString("cluster", policy.cluster().value());
endpointObject.setBool("tls", endpoint.tls());
endpointObject.setString("url", endpoint.url().toString());
}
Cursor serviceUrlArray = response.setArray("serviceUrls");
controller.applications().getDeploymentEndpoints(deploymentId)
.ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId()
.ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentCost appCost = deployment.calculateCost();
Cursor costObject = response.setObject("cost");
toSlime(appCost, costObject);
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if (!applicationVersion.isUnknown()) {
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if ( ! revision.isPresent()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationStatus status, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", status.name().toUpperCase());
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Inspector requestData = toSlime(request.getData()).get();
String reason = mandatory("reason", requestData).asString();
String agent = requireUserPrincipal(request).getName();
long timestamp = controller.clock().instant().getEpochSecond();
EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp);
controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()),
endpointStatus);
return new MessageResponse(String.format("Successfully set %s in %s.%s %s service",
application.id().toShortString(),
deployment.zone().environment().value(),
deployment.zone().region().value(),
inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId);
for (RoutingEndpoint endpoint : status.keySet()) {
EndpointStatus currentStatus = status.get(endpoint);
array.addString(endpoint.upstreamName());
Cursor statusObject = array.addObject();
statusObject.setString("status", currentStatus.getStatus().name());
statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason());
statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent());
statusObject.setLong("timestamp", currentStatus.getEpoch());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().require(applicationId);
ZoneId zone = ZoneId.from(environment, region);
if (!application.rotation().isPresent()) {
throw new NotExistsException("global rotation does not exist for " + application);
}
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(application.rotationStatus(deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenant, String application, HttpRequest request) {
Application app = controller.applications().require(ApplicationId.from(tenant, application, "default"));
Slime slime = new Slime();
Cursor root = slime.setObject();
if (!app.change().isEmpty()) {
app.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", app.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse createUser(HttpRequest request) {
String user = Optional.of(requireUserPrincipal(request))
.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzIdentity::getName)
.map(UserTenant::normalizeUser)
.orElseThrow(() -> new ForbiddenException("Not authenticated or not a user."));
UserTenant tenant = UserTenant.create(user);
try {
controller.tenants().createUser(tenant);
return new MessageResponse("Created user '" + user + "'");
} catch (AlreadyExistsException e) {
return new MessageResponse("User '" + user + "' already exists");
}
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
getTenantOrThrow(tenantName);
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().update(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Inspector requestObject = toSlime(request.getData()).get();
controller.tenants().create(accessControlRequests.specification(tenant, requestObject),
accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest()));
return tenant(controller.tenants().require(TenantName.from(tenantName)), request);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
try {
Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user
? Optional.empty()
: Optional.of(accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()));
Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
toSlime(application, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
catch (ZmsClientException e) {
if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN)
throw new ForbiddenException("Not authorized to create application", e);
else
throw e;
}
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = application.get().change();
if (change.isEmpty()) {
response.append("No deployment in progress for " + application + " at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '" + change + "' to '" +
controller.applications().require(id).change() + "' for " + application);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().get(tenantName);
if ( ! tenant.isPresent())
return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
if (tenant.get().type() == Tenant.Type.user)
controller.tenants().deleteUser((UserTenant) tenant.get());
else
controller.tenants().delete(tenant.get().name(),
accessControlRequests.credentials(tenant.get().name(),
toSlime(request.getData()).get(),
request.getJDiscRequest()));
return tenant(tenant.get(), request);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user
? Optional.empty()
: Optional.of(accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()));
controller.applications().deleteApplication(id, credentials);
return new EmptyJsonResponse();
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
controller.applications().deactivate(application.id(), ZoneId.from(environment, region));
return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
/**
* Promote application Chef environments. To be used by component jobs only
*/
private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) {
try{
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.systemChefEnvironment();
String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
/**
* Promote application Chef environments for jobs that deploy applications
*/
private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) {
try {
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) {
try {
DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get());
if ( report.jobType() == JobType.component
&& controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally())
throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " +
"longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.");
controller.applications().deploymentTrigger().notifyOfCompletion(report);
return new MessageResponse("ok");
} catch (IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
}
private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) {
Optional<DeploymentJobs.JobError> jobError = Optional.empty();
if (report.field("jobError").valid()) {
jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString()));
}
ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString());
JobType type = JobType.fromJobName(report.field("jobName").asString());
long buildNumber = report.field("buildNumber").asLong();
if (type == JobType.component)
return DeploymentJobs.JobReport.ofComponent(id,
report.field("projectId").asLong(),
buildNumber,
jobError,
toSourceRevision(report.field("sourceRevision")));
else
return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError);
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().get(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request) {
object.setString("tenant", tenant.name().value());
object.setString("type", tentantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
break;
case user: break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
for (Application application : controller.applications().asList(tenant.name())) {
if (application.id().instance().isDefault()) {
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), application, request);
else
toSlime(application, applicationArray.addObject(), request);
}
}
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tentantType(tenant));
switch (tenant.type()) {
case athenz:
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
break;
case user: break;
case cloud: break;
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(JobStatus.JobRun jobRun, Cursor object) {
object.setLong("id", jobRun.id());
object.setString("version", jobRun.platform().toFullString());
if (!jobRun.application().isUnknown())
toSlime(jobRun.application(), object.setObject("revision"));
object.setString("reason", jobRun.reason());
object.setLong("at", jobRun.at().toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private static Principal requireUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
return principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(Application application, Cursor object, HttpRequest request) {
object.setString("tenant", application.id().tenant().value());
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(), request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
public static void toSlime(DeploymentCost deploymentCost, Cursor object) {
object.setLong("tco", (long)deploymentCost.getTco());
object.setLong("waste", (long)deploymentCost.getWaste());
object.setDouble("utilization", deploymentCost.getUtilization());
Cursor clustersObject = object.setObject("cluster");
for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet())
toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey()));
}
private static void toSlime(ClusterCost clusterCost, Cursor object) {
object.setLong("count", clusterCost.getClusterInfo().getHostnames().size());
object.setString("resource", getResourceName(clusterCost.getResultUtilization()));
object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization());
object.setLong("tco", (int)clusterCost.getTco());
object.setLong("waste", (int)clusterCost.getWaste());
object.setString("flavor", clusterCost.getClusterInfo().getFlavor());
object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost());
object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU());
object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem());
object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk());
object.setString("type", clusterCost.getClusterInfo().getClusterType().name());
Cursor utilObject = object.setObject("util");
utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu());
utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory());
utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk());
utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy());
Cursor usageObject = object.setObject("usage");
usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu());
usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory());
usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk());
usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy());
Cursor hostnamesArray = object.setArray("hostnames");
for (String hostname : clusterCost.getClusterInfo().getHostnames())
hostnamesArray.addString(hostname);
}
private static String getResourceName(ClusterUtilization utilization) {
String name = "cpu";
double max = utilization.getMaxUtilization();
if (utilization.getMemory() == max) {
name = "mem";
} else if (utilization.getDisk() == max) {
name = "disk";
} else if (utilization.getDiskBusy() == max) {
name = "diskbusy";
}
return name;
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static String tentantType(Tenant tenant) {
switch (tenant.type()) {
case user: return "USER";
case athenz: return "ATHENS";
case cloud: return "CLOUD";
default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
SourceRevision sourceRevision = toSourceRevision(submitOptions);
String authorEmail = submitOptions.field("authorEmail").asString();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
private static Map<String, byte[]> parseDataParts(HttpRequest request) {
String contentHash = request.getHeader("x-Content-Hash");
if (contentHash == null)
return new MultipartParser().parse(request);
DigestInputStream digester = Signatures.sha256Digester(request.getData());
var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri());
if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash)))
throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash");
return dataParts;
}
} |
Doesn't allow adding multiple values for the same header. | private RequestBuilder header(String name, String value) {
this.headers.putIfAbsent(name, new ArrayList<>());
this.headers.get(name).add(value);
return this;
} | this.headers.putIfAbsent(name, new ArrayList<>()); | private RequestBuilder header(String name, String value) {
this.headers.putIfAbsent(name, new ArrayList<>());
this.headers.get(name).add(value);
return this;
} | class RequestBuilder implements Supplier<Request> {
private final String path;
private final Request.Method method;
private byte[] data = new byte[0];
private AthenzIdentity identity;
private OktaAccessToken oktaAccessToken;
private String contentType = "application/json";
private Map<String, List<String>> headers = new HashMap<>();
private String recursive;
private RequestBuilder(String path, Request.Method method) {
this.path = path;
this.method = method;
}
private RequestBuilder data(byte[] data) { this.data = data; return this; }
private RequestBuilder data(String data) { return data(data.getBytes(StandardCharsets.UTF_8)); }
private RequestBuilder data(MultiPartStreamer streamer) {
return Exceptions.uncheck(() -> data(streamer.data().readAllBytes()).contentType(streamer.contentType()));
}
private RequestBuilder data(HttpEntity data) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
try {
data.writeTo(out);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return data(out.toByteArray()).contentType(data.getContentType().getValue());
}
private RequestBuilder userIdentity(UserId userId) { this.identity = HostedAthenzIdentities.from(userId); return this; }
private RequestBuilder screwdriverIdentity(ScrewdriverId screwdriverId) { this.identity = HostedAthenzIdentities.from(screwdriverId); return this; }
private RequestBuilder oktaAccessToken(OktaAccessToken oktaAccessToken) { this.oktaAccessToken = oktaAccessToken; return this; }
private RequestBuilder contentType(String contentType) { this.contentType = contentType; return this; }
private RequestBuilder recursive(String recursive) { this.recursive = recursive; return this; }
@Override
public Request get() {
Request request = new Request("http:
(recursive == null ? "" : "?recursive=" + recursive),
data, method);
request.getHeaders().addAll(headers);
request.getHeaders().put("Content-Type", contentType);
if (identity != null) {
addIdentityToRequest(request, identity);
}
if (oktaAccessToken != null) {
addOktaAccessToken(request, oktaAccessToken);
}
return request;
}
} | class RequestBuilder implements Supplier<Request> {
private final String path;
private final Request.Method method;
private byte[] data = new byte[0];
private AthenzIdentity identity;
private OktaAccessToken oktaAccessToken;
private String contentType = "application/json";
private Map<String, List<String>> headers = new HashMap<>();
private String recursive;
private RequestBuilder(String path, Request.Method method) {
this.path = path;
this.method = method;
}
private RequestBuilder data(byte[] data) { this.data = data; return this; }
private RequestBuilder data(String data) { return data(data.getBytes(StandardCharsets.UTF_8)); }
private RequestBuilder data(MultiPartStreamer streamer) {
return Exceptions.uncheck(() -> data(streamer.data().readAllBytes()).contentType(streamer.contentType()));
}
private RequestBuilder userIdentity(UserId userId) { this.identity = HostedAthenzIdentities.from(userId); return this; }
private RequestBuilder screwdriverIdentity(ScrewdriverId screwdriverId) { this.identity = HostedAthenzIdentities.from(screwdriverId); return this; }
private RequestBuilder oktaAccessToken(OktaAccessToken oktaAccessToken) { this.oktaAccessToken = oktaAccessToken; return this; }
private RequestBuilder contentType(String contentType) { this.contentType = contentType; return this; }
private RequestBuilder recursive(String recursive) { this.recursive = recursive; return this; }
@Override
public Request get() {
Request request = new Request("http:
(recursive == null ? "" : "?recursive=" + recursive),
data, method);
request.getHeaders().addAll(headers);
request.getHeaders().put("Content-Type", contentType);
if (identity != null) {
addIdentityToRequest(request, identity);
}
if (oktaAccessToken != null) {
addOktaAccessToken(request, oktaAccessToken);
}
return request;
}
} |
It should have identical semantics as the two code lines you wrote. | private RequestBuilder header(String name, String value) {
this.headers.putIfAbsent(name, new ArrayList<>());
this.headers.get(name).add(value);
return this;
} | this.headers.putIfAbsent(name, new ArrayList<>()); | private RequestBuilder header(String name, String value) {
this.headers.putIfAbsent(name, new ArrayList<>());
this.headers.get(name).add(value);
return this;
} | class RequestBuilder implements Supplier<Request> {
private final String path;
private final Request.Method method;
private byte[] data = new byte[0];
private AthenzIdentity identity;
private OktaAccessToken oktaAccessToken;
private String contentType = "application/json";
private Map<String, List<String>> headers = new HashMap<>();
private String recursive;
private RequestBuilder(String path, Request.Method method) {
this.path = path;
this.method = method;
}
private RequestBuilder data(byte[] data) { this.data = data; return this; }
private RequestBuilder data(String data) { return data(data.getBytes(StandardCharsets.UTF_8)); }
private RequestBuilder data(MultiPartStreamer streamer) {
return Exceptions.uncheck(() -> data(streamer.data().readAllBytes()).contentType(streamer.contentType()));
}
private RequestBuilder data(HttpEntity data) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
try {
data.writeTo(out);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return data(out.toByteArray()).contentType(data.getContentType().getValue());
}
private RequestBuilder userIdentity(UserId userId) { this.identity = HostedAthenzIdentities.from(userId); return this; }
private RequestBuilder screwdriverIdentity(ScrewdriverId screwdriverId) { this.identity = HostedAthenzIdentities.from(screwdriverId); return this; }
private RequestBuilder oktaAccessToken(OktaAccessToken oktaAccessToken) { this.oktaAccessToken = oktaAccessToken; return this; }
private RequestBuilder contentType(String contentType) { this.contentType = contentType; return this; }
private RequestBuilder recursive(String recursive) { this.recursive = recursive; return this; }
@Override
public Request get() {
Request request = new Request("http:
(recursive == null ? "" : "?recursive=" + recursive),
data, method);
request.getHeaders().addAll(headers);
request.getHeaders().put("Content-Type", contentType);
if (identity != null) {
addIdentityToRequest(request, identity);
}
if (oktaAccessToken != null) {
addOktaAccessToken(request, oktaAccessToken);
}
return request;
}
} | class RequestBuilder implements Supplier<Request> {
private final String path;
private final Request.Method method;
private byte[] data = new byte[0];
private AthenzIdentity identity;
private OktaAccessToken oktaAccessToken;
private String contentType = "application/json";
private Map<String, List<String>> headers = new HashMap<>();
private String recursive;
private RequestBuilder(String path, Request.Method method) {
this.path = path;
this.method = method;
}
private RequestBuilder data(byte[] data) { this.data = data; return this; }
private RequestBuilder data(String data) { return data(data.getBytes(StandardCharsets.UTF_8)); }
private RequestBuilder data(MultiPartStreamer streamer) {
return Exceptions.uncheck(() -> data(streamer.data().readAllBytes()).contentType(streamer.contentType()));
}
private RequestBuilder userIdentity(UserId userId) { this.identity = HostedAthenzIdentities.from(userId); return this; }
private RequestBuilder screwdriverIdentity(ScrewdriverId screwdriverId) { this.identity = HostedAthenzIdentities.from(screwdriverId); return this; }
private RequestBuilder oktaAccessToken(OktaAccessToken oktaAccessToken) { this.oktaAccessToken = oktaAccessToken; return this; }
private RequestBuilder contentType(String contentType) { this.contentType = contentType; return this; }
private RequestBuilder recursive(String recursive) { this.recursive = recursive; return this; }
@Override
public Request get() {
Request request = new Request("http:
(recursive == null ? "" : "?recursive=" + recursive),
data, method);
request.getHeaders().addAll(headers);
request.getHeaders().put("Content-Type", contentType);
if (identity != null) {
addIdentityToRequest(request, identity);
}
if (oktaAccessToken != null) {
addOktaAccessToken(request, oktaAccessToken);
}
return request;
}
} |
Oh, sorry. Yes, it would have, if not for the return value being `null` when the key was previously unmapped. | private RequestBuilder header(String name, String value) {
this.headers.putIfAbsent(name, new ArrayList<>());
this.headers.get(name).add(value);
return this;
} | this.headers.putIfAbsent(name, new ArrayList<>()); | private RequestBuilder header(String name, String value) {
this.headers.putIfAbsent(name, new ArrayList<>());
this.headers.get(name).add(value);
return this;
} | class RequestBuilder implements Supplier<Request> {
private final String path;
private final Request.Method method;
private byte[] data = new byte[0];
private AthenzIdentity identity;
private OktaAccessToken oktaAccessToken;
private String contentType = "application/json";
private Map<String, List<String>> headers = new HashMap<>();
private String recursive;
private RequestBuilder(String path, Request.Method method) {
this.path = path;
this.method = method;
}
private RequestBuilder data(byte[] data) { this.data = data; return this; }
private RequestBuilder data(String data) { return data(data.getBytes(StandardCharsets.UTF_8)); }
private RequestBuilder data(MultiPartStreamer streamer) {
return Exceptions.uncheck(() -> data(streamer.data().readAllBytes()).contentType(streamer.contentType()));
}
private RequestBuilder data(HttpEntity data) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
try {
data.writeTo(out);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return data(out.toByteArray()).contentType(data.getContentType().getValue());
}
private RequestBuilder userIdentity(UserId userId) { this.identity = HostedAthenzIdentities.from(userId); return this; }
private RequestBuilder screwdriverIdentity(ScrewdriverId screwdriverId) { this.identity = HostedAthenzIdentities.from(screwdriverId); return this; }
private RequestBuilder oktaAccessToken(OktaAccessToken oktaAccessToken) { this.oktaAccessToken = oktaAccessToken; return this; }
private RequestBuilder contentType(String contentType) { this.contentType = contentType; return this; }
private RequestBuilder recursive(String recursive) { this.recursive = recursive; return this; }
@Override
public Request get() {
Request request = new Request("http:
(recursive == null ? "" : "?recursive=" + recursive),
data, method);
request.getHeaders().addAll(headers);
request.getHeaders().put("Content-Type", contentType);
if (identity != null) {
addIdentityToRequest(request, identity);
}
if (oktaAccessToken != null) {
addOktaAccessToken(request, oktaAccessToken);
}
return request;
}
} | class RequestBuilder implements Supplier<Request> {
private final String path;
private final Request.Method method;
private byte[] data = new byte[0];
private AthenzIdentity identity;
private OktaAccessToken oktaAccessToken;
private String contentType = "application/json";
private Map<String, List<String>> headers = new HashMap<>();
private String recursive;
private RequestBuilder(String path, Request.Method method) {
this.path = path;
this.method = method;
}
private RequestBuilder data(byte[] data) { this.data = data; return this; }
private RequestBuilder data(String data) { return data(data.getBytes(StandardCharsets.UTF_8)); }
private RequestBuilder data(MultiPartStreamer streamer) {
return Exceptions.uncheck(() -> data(streamer.data().readAllBytes()).contentType(streamer.contentType()));
}
private RequestBuilder userIdentity(UserId userId) { this.identity = HostedAthenzIdentities.from(userId); return this; }
private RequestBuilder screwdriverIdentity(ScrewdriverId screwdriverId) { this.identity = HostedAthenzIdentities.from(screwdriverId); return this; }
private RequestBuilder oktaAccessToken(OktaAccessToken oktaAccessToken) { this.oktaAccessToken = oktaAccessToken; return this; }
private RequestBuilder contentType(String contentType) { this.contentType = contentType; return this; }
private RequestBuilder recursive(String recursive) { this.recursive = recursive; return this; }
@Override
public Request get() {
Request request = new Request("http:
(recursive == null ? "" : "?recursive=" + recursive),
data, method);
request.getHeaders().addAll(headers);
request.getHeaders().put("Content-Type", contentType);
if (identity != null) {
addIdentityToRequest(request, identity);
}
if (oktaAccessToken != null) {
addOktaAccessToken(request, oktaAccessToken);
}
return request;
}
} |
Good point - `putIfAbsent` did not return the newly inserted value as I assumed | private RequestBuilder header(String name, String value) {
this.headers.putIfAbsent(name, new ArrayList<>());
this.headers.get(name).add(value);
return this;
} | this.headers.putIfAbsent(name, new ArrayList<>()); | private RequestBuilder header(String name, String value) {
this.headers.putIfAbsent(name, new ArrayList<>());
this.headers.get(name).add(value);
return this;
} | class RequestBuilder implements Supplier<Request> {
private final String path;
private final Request.Method method;
private byte[] data = new byte[0];
private AthenzIdentity identity;
private OktaAccessToken oktaAccessToken;
private String contentType = "application/json";
private Map<String, List<String>> headers = new HashMap<>();
private String recursive;
private RequestBuilder(String path, Request.Method method) {
this.path = path;
this.method = method;
}
private RequestBuilder data(byte[] data) { this.data = data; return this; }
private RequestBuilder data(String data) { return data(data.getBytes(StandardCharsets.UTF_8)); }
private RequestBuilder data(MultiPartStreamer streamer) {
return Exceptions.uncheck(() -> data(streamer.data().readAllBytes()).contentType(streamer.contentType()));
}
private RequestBuilder data(HttpEntity data) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
try {
data.writeTo(out);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return data(out.toByteArray()).contentType(data.getContentType().getValue());
}
private RequestBuilder userIdentity(UserId userId) { this.identity = HostedAthenzIdentities.from(userId); return this; }
private RequestBuilder screwdriverIdentity(ScrewdriverId screwdriverId) { this.identity = HostedAthenzIdentities.from(screwdriverId); return this; }
private RequestBuilder oktaAccessToken(OktaAccessToken oktaAccessToken) { this.oktaAccessToken = oktaAccessToken; return this; }
private RequestBuilder contentType(String contentType) { this.contentType = contentType; return this; }
private RequestBuilder recursive(String recursive) { this.recursive = recursive; return this; }
@Override
public Request get() {
Request request = new Request("http:
(recursive == null ? "" : "?recursive=" + recursive),
data, method);
request.getHeaders().addAll(headers);
request.getHeaders().put("Content-Type", contentType);
if (identity != null) {
addIdentityToRequest(request, identity);
}
if (oktaAccessToken != null) {
addOktaAccessToken(request, oktaAccessToken);
}
return request;
}
} | class RequestBuilder implements Supplier<Request> {
private final String path;
private final Request.Method method;
private byte[] data = new byte[0];
private AthenzIdentity identity;
private OktaAccessToken oktaAccessToken;
private String contentType = "application/json";
private Map<String, List<String>> headers = new HashMap<>();
private String recursive;
private RequestBuilder(String path, Request.Method method) {
this.path = path;
this.method = method;
}
private RequestBuilder data(byte[] data) { this.data = data; return this; }
private RequestBuilder data(String data) { return data(data.getBytes(StandardCharsets.UTF_8)); }
private RequestBuilder data(MultiPartStreamer streamer) {
return Exceptions.uncheck(() -> data(streamer.data().readAllBytes()).contentType(streamer.contentType()));
}
private RequestBuilder userIdentity(UserId userId) { this.identity = HostedAthenzIdentities.from(userId); return this; }
private RequestBuilder screwdriverIdentity(ScrewdriverId screwdriverId) { this.identity = HostedAthenzIdentities.from(screwdriverId); return this; }
private RequestBuilder oktaAccessToken(OktaAccessToken oktaAccessToken) { this.oktaAccessToken = oktaAccessToken; return this; }
private RequestBuilder contentType(String contentType) { this.contentType = contentType; return this; }
private RequestBuilder recursive(String recursive) { this.recursive = recursive; return this; }
@Override
public Request get() {
Request request = new Request("http:
(recursive == null ? "" : "?recursive=" + recursive),
data, method);
request.getHeaders().addAll(headers);
request.getHeaders().put("Content-Type", contentType);
if (identity != null) {
addIdentityToRequest(request, identity);
}
if (oktaAccessToken != null) {
addOktaAccessToken(request, oktaAccessToken);
}
return request;
}
} |
Consider abstracting user output as a separate interface (in case you want to write a unit test that asserts on the output). | protected void doExecute() {
Deployment deployment = build == null
? Deployment.ofPackage(Paths.get(firstNonBlank(applicationZip, projectPathOf("target", "application.zip"))))
: Deployment.ofReference(repository, branch, commit, build);
if ("true".equalsIgnoreCase(ignoreValidationErrors)) deployment = deployment.ignoringValidationErrors();
if (vespaVersion != null) deployment = deployment.atVersion(vespaVersion);
ZoneId zone = environment == null || region == null ? controller.devZone() : ZoneId.from(environment, region);
System.out.println(controller.deploy(deployment, id, zone).json());
} | System.out.println(controller.deploy(deployment, id, zone).json()); | protected void doExecute() {
Deployment deployment = build == null
? Deployment.ofPackage(Paths.get(firstNonBlank(applicationZip, projectPathOf("target", "application.zip"))))
: Deployment.ofReference(repository, branch, commit, build);
if ("true".equalsIgnoreCase(ignoreValidationErrors)) deployment = deployment.ignoringValidationErrors();
if (vespaVersion != null) deployment = deployment.atVersion(vespaVersion);
ZoneId zone = environment == null || region == null ? controller.devZone() : ZoneId.from(environment, region);
System.out.println(controller.deploy(deployment, id, zone).json());
} | class DeployMojo extends AbstractVespaMojo {
@Parameter(property = "applicationZip")
private String applicationZip;
@Parameter(property = "vespaVersion")
private String vespaVersion;
@Parameter(property = "ignoreValidationErrors")
private String ignoreValidationErrors;
@Parameter(property = "environment")
private String environment;
@Parameter(property = "region")
private String region;
@Parameter(property = "repository")
private String repository;
@Parameter(property = "branch")
private String branch;
@Parameter(property = "commit")
private String commit;
@Parameter(property = "build")
private Long build;
@Override
} | class DeployMojo extends AbstractVespaMojo {
@Parameter(property = "applicationZip")
private String applicationZip;
@Parameter(property = "vespaVersion")
private String vespaVersion;
@Parameter(property = "ignoreValidationErrors")
private String ignoreValidationErrors;
@Parameter(property = "environment")
private String environment;
@Parameter(property = "region")
private String region;
@Parameter(property = "repository")
private String repository;
@Parameter(property = "branch")
private String branch;
@Parameter(property = "commit")
private String commit;
@Parameter(property = "build")
private Long build;
@Override
} |
Yes, will consider that. | protected void doExecute() {
Deployment deployment = build == null
? Deployment.ofPackage(Paths.get(firstNonBlank(applicationZip, projectPathOf("target", "application.zip"))))
: Deployment.ofReference(repository, branch, commit, build);
if ("true".equalsIgnoreCase(ignoreValidationErrors)) deployment = deployment.ignoringValidationErrors();
if (vespaVersion != null) deployment = deployment.atVersion(vespaVersion);
ZoneId zone = environment == null || region == null ? controller.devZone() : ZoneId.from(environment, region);
System.out.println(controller.deploy(deployment, id, zone).json());
} | System.out.println(controller.deploy(deployment, id, zone).json()); | protected void doExecute() {
Deployment deployment = build == null
? Deployment.ofPackage(Paths.get(firstNonBlank(applicationZip, projectPathOf("target", "application.zip"))))
: Deployment.ofReference(repository, branch, commit, build);
if ("true".equalsIgnoreCase(ignoreValidationErrors)) deployment = deployment.ignoringValidationErrors();
if (vespaVersion != null) deployment = deployment.atVersion(vespaVersion);
ZoneId zone = environment == null || region == null ? controller.devZone() : ZoneId.from(environment, region);
System.out.println(controller.deploy(deployment, id, zone).json());
} | class DeployMojo extends AbstractVespaMojo {
@Parameter(property = "applicationZip")
private String applicationZip;
@Parameter(property = "vespaVersion")
private String vespaVersion;
@Parameter(property = "ignoreValidationErrors")
private String ignoreValidationErrors;
@Parameter(property = "environment")
private String environment;
@Parameter(property = "region")
private String region;
@Parameter(property = "repository")
private String repository;
@Parameter(property = "branch")
private String branch;
@Parameter(property = "commit")
private String commit;
@Parameter(property = "build")
private Long build;
@Override
} | class DeployMojo extends AbstractVespaMojo {
@Parameter(property = "applicationZip")
private String applicationZip;
@Parameter(property = "vespaVersion")
private String vespaVersion;
@Parameter(property = "ignoreValidationErrors")
private String ignoreValidationErrors;
@Parameter(property = "environment")
private String environment;
@Parameter(property = "region")
private String region;
@Parameter(property = "repository")
private String repository;
@Parameter(property = "branch")
private String branch;
@Parameter(property = "commit")
private String commit;
@Parameter(property = "build")
private Long build;
@Override
} |
Always feels weird using `System.out.println` :D | protected void doExecute() {
Deployment deployment = build == null
? Deployment.ofPackage(Paths.get(firstNonBlank(applicationZip, projectPathOf("target", "application.zip"))))
: Deployment.ofReference(repository, branch, commit, build);
if ("true".equalsIgnoreCase(ignoreValidationErrors)) deployment = deployment.ignoringValidationErrors();
if (vespaVersion != null) deployment = deployment.atVersion(vespaVersion);
ZoneId zone = environment == null || region == null ? controller.devZone() : ZoneId.from(environment, region);
System.out.println(controller.deploy(deployment, id, zone).json());
} | System.out.println(controller.deploy(deployment, id, zone).json()); | protected void doExecute() {
Deployment deployment = build == null
? Deployment.ofPackage(Paths.get(firstNonBlank(applicationZip, projectPathOf("target", "application.zip"))))
: Deployment.ofReference(repository, branch, commit, build);
if ("true".equalsIgnoreCase(ignoreValidationErrors)) deployment = deployment.ignoringValidationErrors();
if (vespaVersion != null) deployment = deployment.atVersion(vespaVersion);
ZoneId zone = environment == null || region == null ? controller.devZone() : ZoneId.from(environment, region);
System.out.println(controller.deploy(deployment, id, zone).json());
} | class DeployMojo extends AbstractVespaMojo {
@Parameter(property = "applicationZip")
private String applicationZip;
@Parameter(property = "vespaVersion")
private String vespaVersion;
@Parameter(property = "ignoreValidationErrors")
private String ignoreValidationErrors;
@Parameter(property = "environment")
private String environment;
@Parameter(property = "region")
private String region;
@Parameter(property = "repository")
private String repository;
@Parameter(property = "branch")
private String branch;
@Parameter(property = "commit")
private String commit;
@Parameter(property = "build")
private Long build;
@Override
} | class DeployMojo extends AbstractVespaMojo {
@Parameter(property = "applicationZip")
private String applicationZip;
@Parameter(property = "vespaVersion")
private String vespaVersion;
@Parameter(property = "ignoreValidationErrors")
private String ignoreValidationErrors;
@Parameter(property = "environment")
private String environment;
@Parameter(property = "region")
private String region;
@Parameter(property = "repository")
private String repository;
@Parameter(property = "branch")
private String branch;
@Parameter(property = "commit")
private String commit;
@Parameter(property = "build")
private Long build;
@Override
} |
You only need to check to `j < i` or start at `i + 1`, then you wont need the `if` below... | public List<Node> addNodes(List<Node> nodes) {
try (Mutex lock = lockAllocation()) {
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
var message = "Cannot add " + node.hostname() + ": A node with this name already exists";
if (getNode(node.hostname()).isPresent()) throw new IllegalArgumentException(message);
for (int j = 0; j < nodes.size(); j++) {
if (i == j) continue;
var other = nodes.get(j);
if (node.equals(other)) throw new IllegalArgumentException(message);
}
}
return db.addNodes(nodes);
}
} | for (int j = 0; j < nodes.size(); j++) { | public List<Node> addNodes(List<Node> nodes) {
try (Mutex lock = lockAllocation()) {
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
var message = "Cannot add " + node.hostname() + ": A node with this name already exists";
if (getNode(node.hostname()).isPresent()) throw new IllegalArgumentException(message);
for (int j = 0; j < i; j++) {
var other = nodes.get(j);
if (node.equals(other)) throw new IllegalArgumentException(message);
}
}
return db.addNodes(nodes);
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Clock clock;
private final Zone zone;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final OsVersions osVersions;
private final InfrastructureVersions infrastructureVersions;
private final FirmwareChecks firmwareChecks;
private final DockerImages dockerImages;
private final JobControl jobControl;
/**
* Creates a node repository from a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.dockerImage()), config.useCuratorClientCache());
}
/**
* Creates a node repository from a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage, boolean useCuratorClientCache) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache);
this.zone = zone;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.osVersions = new OsVersions(db);
this.infrastructureVersions = new InfrastructureVersions(db);
this.firmwareChecks = new FirmwareChecks(db, clock);
this.dockerImages = new DockerImages(db, dockerImage);
this.jobControl = new JobControl(db);
for (Node.State state : Node.State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for nodes in this */
public DockerImage dockerImage(NodeType nodeType) { return dockerImages.dockerImageFor(nodeType); }
/** @return The name resolver used to resolve hostname and ip addresses */
public NameResolver nameResolver() { return nameResolver; }
/** Returns the OS versions to use for nodes in this */
public OsVersions osVersions() { return osVersions; }
/** Returns the infrastructure versions to use for nodes in this */
public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; }
/** Returns the status of firmware checks for hosts managed by this. */
public FirmwareChecks firmwareChecks() { return firmwareChecks; }
/** Returns the docker images to use for nodes in this. */
public DockerImages dockerImages() { return dockerImages; }
/** Returns the status of maintenance jobs managed by this. */
public JobControl jobControl() { return jobControl; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return new ArrayList<>(db.getNodes(inState));
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/** Returns a filterable list of all nodes in this repository */
public NodeList list() {
return new NodeList(getNodes());
}
/** Returns a filterable list of all load balancers in this repository */
public LoadBalancerList loadBalancers() {
return new LoadBalancerList(database().readLoadBalancers().values());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return db.getNodes(Node.State.failed); }
/**
* Returns the ACL for the node (trusted nodes, networks and ports)
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates, LoadBalancerList loadBalancers) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<Integer> trustedPorts = new LinkedHashSet<>();
Set<String> trustedNetworks = new LinkedHashSet<>();
node.allocation().ifPresent(allocation -> {
trustedNodes.addAll(candidates.owner(allocation.owner()).asList());
loadBalancers.owner(allocation.owner()).asList().stream()
.map(LoadBalancer::instance)
.map(LoadBalancerInstance::networks)
.forEach(trustedNetworks::addAll);
});
trustedPorts.add(22);
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedNodes.addAll(candidates.parentsOf(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
trustedPorts.add(4443);
break;
case proxy:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedPorts.add(443);
trustedPorts.add(4080);
trustedPorts.add(4443);
break;
case controller:
trustedPorts.add(4443);
trustedPorts.add(443);
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = list();
LoadBalancerList loadBalancers = loadBalancers();
if (children) {
return candidates.childrenOf(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates, loadBalancers))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
return Collections.singletonList(getNodeAcl(node, candidates, loadBalancers));
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> ipAddressPool, Optional<String> parentHostname,
Optional<String> modelName, Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), ipAddressPool, hostname, parentHostname, modelName, flavor, type);
}
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, Optional.empty(), flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(List<Node> nodes, Mutex allocationLock) {
for (Node node : nodes) {
if (!node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": This is not a docker node");
}
if (!node.allocation().isPresent()) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": Docker containers needs to be allocated");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes, Node.State.reserved);
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockAllocation()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.with(node.status().withWantToRetire(false).withWantToDeprovision(false));
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
db.writeTo(Node.State.inactive,
db.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), node -> setDirty(node, agent, reason));
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(Node node, Agent agent, String reason) {
if (node.status().hardwareFailureDescription().isPresent())
throw new IllegalArgumentException("Could not deallocate " + node.hostname() + ": It has a hardware failure");
return db.writeTo(Node.State.dirty, node, agent, Optional.of(reason));
}
public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = getNode(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isDockerHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if (!hostnamesNotAllowedToDirty.isEmpty()) {
throw new IllegalArgumentException("Could not deallocate " + hostname + ": " +
String.join(", ", hostnamesNotAllowedToDirty) + " must be in either provisioned, failed or parked state");
}
return nodesToDirty.stream()
.map(node -> setDirty(node, agent, reason))
.collect(Collectors.toList());
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, true, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, keepAllocation, Node.State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, true, Node.State.active, agent, Optional.of(reason));
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, true, toState, agent, reason));
return moved;
}
private Node move(String hostname, boolean keepAllocation, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For tenant docker
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty) {
throw new IllegalArgumentException(
"Cannot make " + hostname + " available for new allocation, must be in state dirty, but was in " + node.state());
}
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
return setReady(Collections.singletonList(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return List of all the nodes that have been removed
*/
public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockAllocation()) {
List<Node> removed = new ArrayList<>();
if (node.type().isDockerHost()) {
list().childrenOf(node).asList().stream()
.filter(child -> force || canRemove(child, true))
.forEach(removed::add);
}
if (force || canRemove(node, false)) removed.add(node);
db.removeNodes(removed);
return removed;
} catch (RuntimeException e) {
throw new IllegalArgumentException("Failed to delete " + node.hostname(), e);
}
}
/**
* Returns whether given node can be removed. Removal is allowed if:
* Tenant node: node is unallocated
* Non-Docker-container node: iff in state provisioned|failed|parked
* Docker-container-node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|ready
*/
private boolean canRemove(Node node, boolean deletingAsChild) {
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
throw new IllegalArgumentException("Node is currently allocated and cannot be removed: " +
node.allocation().get());
}
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && !deletingAsChild) {
if (node.state() != Node.State.ready) {
throw new IllegalArgumentException(
String.format("Docker container %s can only be removed when in ready state", node.hostname()));
}
} else if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked,
Node.State.ready);
if (! legalStates.contains(node.state())) {
throw new IllegalArgumentException(String.format("Child node %s can only be removed from following states: %s",
node.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
} else {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
throw new IllegalArgumentException(String.format("Node %s can only be removed from following states: %s",
node.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
}
return true;
}
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return db.writeTo(node.state(), node, Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) { return db.writeTo(nodes, Agent.system, Optional.empty()); }
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockAllocation()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Returns the zone of this system */
public Zone zone() { return zone; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to allocating nodes */
public Mutex lockAllocation() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
public Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockAllocation();
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Clock clock;
private final Zone zone;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final OsVersions osVersions;
private final InfrastructureVersions infrastructureVersions;
private final FirmwareChecks firmwareChecks;
private final DockerImages dockerImages;
private final JobControl jobControl;
/**
* Creates a node repository from a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.dockerImage()), config.useCuratorClientCache());
}
/**
* Creates a node repository from a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage, boolean useCuratorClientCache) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache);
this.zone = zone;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.osVersions = new OsVersions(db);
this.infrastructureVersions = new InfrastructureVersions(db);
this.firmwareChecks = new FirmwareChecks(db, clock);
this.dockerImages = new DockerImages(db, dockerImage);
this.jobControl = new JobControl(db);
for (Node.State state : Node.State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for nodes in this */
public DockerImage dockerImage(NodeType nodeType) { return dockerImages.dockerImageFor(nodeType); }
/** @return The name resolver used to resolve hostname and ip addresses */
public NameResolver nameResolver() { return nameResolver; }
/** Returns the OS versions to use for nodes in this */
public OsVersions osVersions() { return osVersions; }
/** Returns the infrastructure versions to use for nodes in this */
public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; }
/** Returns the status of firmware checks for hosts managed by this. */
public FirmwareChecks firmwareChecks() { return firmwareChecks; }
/** Returns the docker images to use for nodes in this. */
public DockerImages dockerImages() { return dockerImages; }
/** Returns the status of maintenance jobs managed by this. */
public JobControl jobControl() { return jobControl; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return new ArrayList<>(db.getNodes(inState));
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/** Returns a filterable list of all nodes in this repository */
public NodeList list() {
return new NodeList(getNodes());
}
/** Returns a filterable list of all load balancers in this repository */
public LoadBalancerList loadBalancers() {
return new LoadBalancerList(database().readLoadBalancers().values());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return db.getNodes(Node.State.failed); }
/**
* Returns the ACL for the node (trusted nodes, networks and ports)
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates, LoadBalancerList loadBalancers) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<Integer> trustedPorts = new LinkedHashSet<>();
Set<String> trustedNetworks = new LinkedHashSet<>();
node.allocation().ifPresent(allocation -> {
trustedNodes.addAll(candidates.owner(allocation.owner()).asList());
loadBalancers.owner(allocation.owner()).asList().stream()
.map(LoadBalancer::instance)
.map(LoadBalancerInstance::networks)
.forEach(trustedNetworks::addAll);
});
trustedPorts.add(22);
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedNodes.addAll(candidates.parentsOf(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
trustedPorts.add(4443);
break;
case proxy:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedPorts.add(443);
trustedPorts.add(4080);
trustedPorts.add(4443);
break;
case controller:
trustedPorts.add(4443);
trustedPorts.add(443);
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = list();
LoadBalancerList loadBalancers = loadBalancers();
if (children) {
return candidates.childrenOf(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates, loadBalancers))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
return Collections.singletonList(getNodeAcl(node, candidates, loadBalancers));
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> ipAddressPool, Optional<String> parentHostname,
Optional<String> modelName, Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), ipAddressPool, hostname, parentHostname, modelName, flavor, type);
}
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, Optional.empty(), flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(List<Node> nodes, Mutex allocationLock) {
for (Node node : nodes) {
if (!node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": This is not a docker node");
}
if (!node.allocation().isPresent()) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": Docker containers needs to be allocated");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes, Node.State.reserved);
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockAllocation()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.with(node.status().withWantToRetire(false).withWantToDeprovision(false));
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
db.writeTo(Node.State.inactive,
db.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), node -> setDirty(node, agent, reason));
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(Node node, Agent agent, String reason) {
if (node.status().hardwareFailureDescription().isPresent())
throw new IllegalArgumentException("Could not deallocate " + node.hostname() + ": It has a hardware failure");
return db.writeTo(Node.State.dirty, node, agent, Optional.of(reason));
}
public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = getNode(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isDockerHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if (!hostnamesNotAllowedToDirty.isEmpty()) {
throw new IllegalArgumentException("Could not deallocate " + hostname + ": " +
String.join(", ", hostnamesNotAllowedToDirty) + " must be in either provisioned, failed or parked state");
}
return nodesToDirty.stream()
.map(node -> setDirty(node, agent, reason))
.collect(Collectors.toList());
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, true, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, keepAllocation, Node.State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, true, Node.State.active, agent, Optional.of(reason));
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, true, toState, agent, reason));
return moved;
}
private Node move(String hostname, boolean keepAllocation, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For tenant docker
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty) {
throw new IllegalArgumentException(
"Cannot make " + hostname + " available for new allocation, must be in state dirty, but was in " + node.state());
}
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
return setReady(Collections.singletonList(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return List of all the nodes that have been removed
*/
public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockAllocation()) {
List<Node> removed = new ArrayList<>();
if (node.type().isDockerHost()) {
list().childrenOf(node).asList().stream()
.filter(child -> force || canRemove(child, true))
.forEach(removed::add);
}
if (force || canRemove(node, false)) removed.add(node);
db.removeNodes(removed);
return removed;
} catch (RuntimeException e) {
throw new IllegalArgumentException("Failed to delete " + node.hostname(), e);
}
}
/**
* Returns whether given node can be removed. Removal is allowed if:
* Tenant node: node is unallocated
* Non-Docker-container node: iff in state provisioned|failed|parked
* Docker-container-node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|ready
*/
private boolean canRemove(Node node, boolean deletingAsChild) {
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
throw new IllegalArgumentException("Node is currently allocated and cannot be removed: " +
node.allocation().get());
}
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && !deletingAsChild) {
if (node.state() != Node.State.ready) {
throw new IllegalArgumentException(
String.format("Docker container %s can only be removed when in ready state", node.hostname()));
}
} else if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked,
Node.State.ready);
if (! legalStates.contains(node.state())) {
throw new IllegalArgumentException(String.format("Child node %s can only be removed from following states: %s",
node.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
} else {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
throw new IllegalArgumentException(String.format("Node %s can only be removed from following states: %s",
node.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
}
return true;
}
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return db.writeTo(node.state(), node, Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) { return db.writeTo(nodes, Agent.system, Optional.empty()); }
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockAllocation()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Returns the zone of this system */
public Zone zone() { return zone; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to allocating nodes */
public Mutex lockAllocation() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
public Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockAllocation();
}
} |
I don't like keeping this internal internal state, ideally we would just create `InfraDeployment` once for every app and then just return that, but this is kinda required by the interface: https://github.com/vespa-engine/vespa/blob/c3667718a63a8703bf62833dcb92b7ad5422d0cc/config-provisioning/src/main/java/com/yahoo/config/provision/Deployment.java#L18 | public void prepare() {
if (prepared) return;
try (Mutex lock = nodeRepository.lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (targetVersion.isEmpty()) {
logger.log(LogLevel.DEBUG, "No target version set for " + nodeType + ", removing application");
removeApplication(application.getApplicationId());
return;
}
candidateNodes = nodeRepository
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive);
if (candidateNodes.isEmpty()) {
logger.log(LogLevel.DEBUG, "No nodes to provision for " + nodeType + ", removing application");
removeApplication(application.getApplicationId());
return;
}
if (!candidateNodes.stream().allMatch(node ->
node.state() == Node.State.active &&
node.allocation()
.map(allocation -> allocation.membership().cluster().vespaVersion().equals(targetVersion.get()))
.orElse(false))) {
hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
}
prepared = true;
}
} | if (prepared) return; | public void prepare() {
if (prepared) return;
try (Mutex lock = nodeRepository.lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (targetVersion.isEmpty()) {
logger.log(LogLevel.DEBUG, "No target version set for " + nodeType + ", removing application");
removeApplication(application.getApplicationId());
return;
}
candidateNodes = nodeRepository
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive);
if (candidateNodes.isEmpty()) {
logger.log(LogLevel.DEBUG, "No nodes to provision for " + nodeType + ", removing application");
removeApplication(application.getApplicationId());
return;
}
if (!allActiveNodesOn(targetVersion.get(), candidateNodes)) {
hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
}
prepared = true;
}
} | class InfraDeployment implements Deployment {
private final InfraApplicationApi application;
private boolean prepared = false;
private List<Node> candidateNodes;
private List<HostSpec> hostSpecs;
private InfraDeployment(InfraApplicationApi application) {
this.application = application;
}
@Override
@Override
public void activate() {
try (Mutex lock = nodeRepository.lock(application.getApplicationId())) {
prepare();
if (candidateNodes == null) return;
if (hostSpecs != null) {
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
}
duperModel.infraApplicationActivated(
application.getApplicationId(),
candidateNodes.stream().map(Node::hostname).map(HostName::from).collect(Collectors.toList()));
logger.log(LogLevel.DEBUG, this::generateActivationLogMessage);
}
}
@Override
public void restart(HostFilter filter) {
provisioner.restart(application.getApplicationId(), filter);
}
private void removeApplication(ApplicationId applicationId) {
if (duperModel.infraApplicationIsActive(applicationId)) {
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.remove(nestedTransaction, applicationId);
nestedTransaction.commit();
duperModel.infraApplicationRemoved(applicationId);
}
}
private String generateActivationLogMessage() {
String detail;
if (candidateNodes.size() < 10) {
detail = ": " + candidateNodes.stream().map(Node::hostname).collect(Collectors.joining(","));
} else {
detail = " with " + candidateNodes.size() + " hosts";
}
return "Infrastructure application " + application.getApplicationId() + " activated" + detail;
}
} | class InfraDeployment implements Deployment {
private final InfraApplicationApi application;
private boolean prepared = false;
private List<Node> candidateNodes;
private List<HostSpec> hostSpecs;
private InfraDeployment(InfraApplicationApi application) {
this.application = application;
}
@Override
@Override
public void activate() {
try (Mutex lock = nodeRepository.lock(application.getApplicationId())) {
prepare();
if (candidateNodes == null) return;
if (hostSpecs != null) {
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
}
duperModel.infraApplicationActivated(
application.getApplicationId(),
candidateNodes.stream().map(Node::hostname).map(HostName::from).collect(Collectors.toList()));
logger.log(LogLevel.DEBUG, () -> generateActivationLogMessage(candidateNodes, application.getApplicationId()));
}
}
@Override
public void restart(HostFilter filter) {
provisioner.restart(application.getApplicationId(), filter);
}
} |
Consider extracting a method for this condition. Maybe something like `if (!activeNodesOn(version, candidates))`? | public void prepare() {
if (prepared) return;
try (Mutex lock = nodeRepository.lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (targetVersion.isEmpty()) {
logger.log(LogLevel.DEBUG, "No target version set for " + nodeType + ", removing application");
removeApplication(application.getApplicationId());
return;
}
candidateNodes = nodeRepository
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive);
if (candidateNodes.isEmpty()) {
logger.log(LogLevel.DEBUG, "No nodes to provision for " + nodeType + ", removing application");
removeApplication(application.getApplicationId());
return;
}
if (!candidateNodes.stream().allMatch(node ->
node.state() == Node.State.active &&
node.allocation()
.map(allocation -> allocation.membership().cluster().vespaVersion().equals(targetVersion.get()))
.orElse(false))) {
hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
}
prepared = true;
}
} | if (!candidateNodes.stream().allMatch(node -> | public void prepare() {
if (prepared) return;
try (Mutex lock = nodeRepository.lock(application.getApplicationId())) {
NodeType nodeType = application.getCapacity().type();
Optional<Version> targetVersion = infrastructureVersions.getTargetVersionFor(nodeType);
if (targetVersion.isEmpty()) {
logger.log(LogLevel.DEBUG, "No target version set for " + nodeType + ", removing application");
removeApplication(application.getApplicationId());
return;
}
candidateNodes = nodeRepository
.getNodes(nodeType, Node.State.ready, Node.State.reserved, Node.State.active, Node.State.inactive);
if (candidateNodes.isEmpty()) {
logger.log(LogLevel.DEBUG, "No nodes to provision for " + nodeType + ", removing application");
removeApplication(application.getApplicationId());
return;
}
if (!allActiveNodesOn(targetVersion.get(), candidateNodes)) {
hostSpecs = provisioner.prepare(
application.getApplicationId(),
application.getClusterSpecWithVersion(targetVersion.get()),
application.getCapacity(),
1,
logger::log);
}
prepared = true;
}
} | class InfraDeployment implements Deployment {
private final InfraApplicationApi application;
private boolean prepared = false;
private List<Node> candidateNodes;
private List<HostSpec> hostSpecs;
private InfraDeployment(InfraApplicationApi application) {
this.application = application;
}
@Override
@Override
public void activate() {
try (Mutex lock = nodeRepository.lock(application.getApplicationId())) {
prepare();
if (candidateNodes == null) return;
if (hostSpecs != null) {
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
}
duperModel.infraApplicationActivated(
application.getApplicationId(),
candidateNodes.stream().map(Node::hostname).map(HostName::from).collect(Collectors.toList()));
logger.log(LogLevel.DEBUG, this::generateActivationLogMessage);
}
}
@Override
public void restart(HostFilter filter) {
provisioner.restart(application.getApplicationId(), filter);
}
private void removeApplication(ApplicationId applicationId) {
if (duperModel.infraApplicationIsActive(applicationId)) {
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.remove(nestedTransaction, applicationId);
nestedTransaction.commit();
duperModel.infraApplicationRemoved(applicationId);
}
}
private String generateActivationLogMessage() {
String detail;
if (candidateNodes.size() < 10) {
detail = ": " + candidateNodes.stream().map(Node::hostname).collect(Collectors.joining(","));
} else {
detail = " with " + candidateNodes.size() + " hosts";
}
return "Infrastructure application " + application.getApplicationId() + " activated" + detail;
}
} | class InfraDeployment implements Deployment {
private final InfraApplicationApi application;
private boolean prepared = false;
private List<Node> candidateNodes;
private List<HostSpec> hostSpecs;
private InfraDeployment(InfraApplicationApi application) {
this.application = application;
}
@Override
@Override
public void activate() {
try (Mutex lock = nodeRepository.lock(application.getApplicationId())) {
prepare();
if (candidateNodes == null) return;
if (hostSpecs != null) {
NestedTransaction nestedTransaction = new NestedTransaction();
provisioner.activate(nestedTransaction, application.getApplicationId(), hostSpecs);
nestedTransaction.commit();
}
duperModel.infraApplicationActivated(
application.getApplicationId(),
candidateNodes.stream().map(Node::hostname).map(HostName::from).collect(Collectors.toList()));
logger.log(LogLevel.DEBUG, () -> generateActivationLogMessage(candidateNodes, application.getApplicationId()));
}
}
@Override
public void restart(HostFilter filter) {
provisioner.restart(application.getApplicationId(), filter);
}
} |
I think we should return an error message if both endpoint and host are set. | static CommandLineArguments build(String[] args) {
final CommandLineArguments cmdArgs;
try {
cmdArgs = SingleCommand.singleCommand(CommandLineArguments.class).parse(args);
} catch (Exception e) {
System.err.println(e.getMessage());
System.err.println("Use --help to show usage.\n");
return null;
}
if (cmdArgs.helpOption.showHelpIfRequested()) {
return null;
}
if (cmdArgs.endpointArg != null) {
try {
URL url = new URL(cmdArgs.endpointArg);
} catch (MalformedURLException e) {
e.printStackTrace(System.err);
return null;
}
} else {
if (cmdArgs.hostArg == null) {
System.err.println("'--host' or '--endpoint' not set.");
return null;
}
}
if (cmdArgs.priorityArg != null && ! checkPriorityFlag(cmdArgs.priorityArg)) {
return null;
}
for (String header : cmdArgs.headers) {
try {
cmdArgs.parsedHeaders.add(BasicLineParser.parseHeader(header, null));
} catch (ParseException e) {
System.err.printf("Invalid header: '%s' (%s)%n", header, e.getMessage());
return null;
}
}
return cmdArgs;
} | } | static CommandLineArguments build(String[] args) {
final CommandLineArguments cmdArgs;
try {
cmdArgs = SingleCommand.singleCommand(CommandLineArguments.class).parse(args);
} catch (Exception e) {
System.err.println(e.getMessage());
System.err.println("Use --help to show usage.\n");
return null;
}
if (cmdArgs.helpOption.showHelpIfRequested()) {
return null;
}
if (cmdArgs.endpointArg != null) {
if (cmdArgs.hostArg != null) {
System.err.println("Cannot set both '--host' and '--endpoint' ");
return null;
}
try {
URL url = new URL(cmdArgs.endpointArg);
} catch (MalformedURLException e) {
e.printStackTrace(System.err);
return null;
}
} else {
if (cmdArgs.hostArg == null) {
System.err.println("'--host' or '--endpoint' not set.");
return null;
}
}
if (cmdArgs.priorityArg != null && ! checkPriorityFlag(cmdArgs.priorityArg)) {
return null;
}
for (String header : cmdArgs.headers) {
try {
cmdArgs.parsedHeaders.add(BasicLineParser.parseHeader(header, null));
} catch (ParseException e) {
System.err.printf("Invalid header: '%s' (%s)%n", header, e.getMessage());
return null;
}
}
return cmdArgs;
} | class CommandLineArguments {
/**
* Creates a CommandLineArguments instance and populates it with data.
*
* @param args array of arguments.
* @return null on failure or if help option is set to true.
*/
private static boolean checkPriorityFlag(String priorityArg) {
switch (priorityArg) {
case "HIGHEST":
case "VERY_HIGH":
case "HIGH_1":
case "HIGH_2":
case "HIGH_3":
case "NORMAL_1":
case "NORMAL_2":
case "NORMAL_3":
case "NORMAL_4":
case "NORMAL_5":
case "NORMAL_6":
case "LOW_1":
case "LOW_2":
case "LOW_3":
case "VERY_LOW":
case "LOWEST":
return true;
default:
System.err.println("Not valid value for priority. Allowed values are HIGHEST, VERY_HIGH, HIGH_[1-3], " +
"NORMAL_[1-6], LOW_[1-3], VERY_LOW, and LOWEST.");
return false;
}
}
@Inject
private HelpOption helpOption;
@Option(name = {"--useV3Protocol"}, description = "Use V3 protocol to gateway. This is the default protocol.")
private boolean enableV3Protocol = true;
@Option(name = {"--file"},
description = "The name of the input file to read.")
private String fileArg = null;
@Option(name = {"--add-root-element-to-xml"},
description = "Add <vespafeed> tag to XML document, makes it easier to feed raw data.")
private boolean addRootElementToXml = false;
@Option(name = {"--route"},
description = "(=default)The route to send the data to.")
private String routeArg = "default";
@Option(name = {"--endpoint"},
description = "Vespa endpoint.")
private String endpointArg;
@Option(name = {"--host"},
description = "The host(s) for the gateway. If using several, use comma to separate them.")
private String hostArg;
@Option(name = {"--port"},
description = "The port for the host of the gateway.")
private int portArg = 4080;
@Option(name = {"--timeout"},
description = "(=180) The time (in seconds) allowed for sending operations.")
private long timeoutArg = 180;
@Option(name = {"--useCompression"},
description = "Use compression over network.")
private boolean useCompressionArg = false;
@Option(name = {"--useDynamicThrottling"},
description = "Try to maximize throughput by using dynamic throttling.")
private boolean useDynamicThrottlingArg = false;
@Option(name = {"--maxpending"},
description = "The maximum number of operations that are allowed " +
"to be pending at any given time.")
private int maxPendingOperationCountArg = 10000;
@Option(name = {"-v", "--verbose"},
description = "Enable verbose output of progress.")
private boolean verboseArg = false;
@Option(name = {"--noretry"},
description = "Turns off retries of recoverable failures..")
private boolean noRetryArg = false;
@Option(name = {"--retrydelay"},
description = "The time (in seconds) to wait between retries of a failed operation.")
private int retrydelayArg = 1;
@Option(name = {"--trace"},
description = "(=0 (=off)) The trace level of network traffic.")
private int traceArg = 0;
@Option(name = {"--printTraceEveryXOperation"},
description = "(=1) How often to to tracing.")
private int traceEveryXOperation = 1;
@Option(name = {"--validate"},
description = "Run validation tool on input files instead of feeding them.")
private boolean validateArg = false;
@Option(name = {"--priority"},
description = "Specify priority of sent messages, see documentation ")
private String priorityArg = null;
@Option(name = {"--numPersistentConnectionsPerEndpoint"},
description = "How many tcp connections to establish per endoint.)")
private int numPersistentConnectionsPerEndpoint = 4;
@Option(name = {"--maxChunkSizeBytes"},
description = "How much data to send to gateway in each message.")
private int maxChunkSizeBytes = 20 * 1024;
@Option(name = {"--whenVerboseEnabledPrintMessageForEveryXDocuments"},
description = "How often to print verbose message.)")
private int whenVerboseEnabledPrintMessageForEveryXDocuments = 1000;
@Option(name = {"--useTls"},
description = "Use TLS when connecting to endpoint")
private boolean useTls = false;
@Option(name = {"--insecure"},
description = "Skip hostname verification when using TLS")
private boolean insecure = false;
@Option(name = {"--header"},
description = "Add http header to every request. Header must have the format '<Name>: <Value>'. Use this parameter multiple times for multiple headers")
private List<String> headers = new ArrayList<>();
private final List<Header> parsedHeaders = new ArrayList<>();
int getWhenVerboseEnabledPrintMessageForEveryXDocuments() {
return whenVerboseEnabledPrintMessageForEveryXDocuments;
}
public String getFile() { return fileArg; };
public boolean getVerbose() { return verboseArg; }
public boolean getAddRootElementToXml() { return addRootElementToXml; }
SessionParams createSessionParams(boolean useJson) {
final int minThrottleValue = useDynamicThrottlingArg ? 10 : 0;
ConnectionParams.Builder connectionParamsBuilder = new ConnectionParams.Builder();
parsedHeaders.forEach(header -> connectionParamsBuilder.addHeader(header.getName(), header.getValue()));
SessionParams.Builder builder = new SessionParams.Builder()
.setFeedParams(
new FeedParams.Builder()
.setDataFormat(useJson
? FeedParams.DataFormat.JSON_UTF8
: FeedParams.DataFormat.XML_UTF8)
.setRoute(routeArg)
.setMaxInFlightRequests(maxPendingOperationCountArg)
.setClientTimeout(timeoutArg, TimeUnit.SECONDS)
.setServerTimeout(timeoutArg, TimeUnit.SECONDS)
.setLocalQueueTimeOut(timeoutArg * 1000)
.setPriority(priorityArg)
.setMaxChunkSizeBytes(maxChunkSizeBytes)
.build()
)
.setConnectionParams(
connectionParamsBuilder
.setHostnameVerifier(insecure ? NoopHostnameVerifier.INSTANCE :
SSLConnectionSocketFactory.getDefaultHostnameVerifier())
.setUseCompression(useCompressionArg)
.setMaxRetries(noRetryArg ? 0 : 100)
.setMinTimeBetweenRetries(retrydelayArg, TimeUnit.SECONDS)
.setDryRun(validateArg)
.setTraceLevel(traceArg)
.setTraceEveryXOperation(traceEveryXOperation)
.setPrintTraceToStdErr(traceArg > 0)
.setNumPersistentConnectionsPerEndpoint(numPersistentConnectionsPerEndpoint)
.build()
)
.setThrottlerMinSize(minThrottleValue)
.setClientQueueSize(maxPendingOperationCountArg);
if (endpointArg != null) {
try {
builder.addCluster(new Cluster.Builder()
.addEndpoint(Endpoint.create(new URL(endpointArg)))
.build());
}
catch (MalformedURLException e) {}
}
else {
Iterable<String> hosts = Splitter.on(',').trimResults().split(hostArg);
for (String host : hosts) {
builder.addCluster(new Cluster.Builder()
.addEndpoint(Endpoint.create(host, portArg, useTls))
.build());
}
}
return builder.build();
}
} | class CommandLineArguments {
/**
* Creates a CommandLineArguments instance and populates it with data.
*
* @param args array of arguments.
* @return null on failure or if help option is set to true.
*/
private static boolean checkPriorityFlag(String priorityArg) {
switch (priorityArg) {
case "HIGHEST":
case "VERY_HIGH":
case "HIGH_1":
case "HIGH_2":
case "HIGH_3":
case "NORMAL_1":
case "NORMAL_2":
case "NORMAL_3":
case "NORMAL_4":
case "NORMAL_5":
case "NORMAL_6":
case "LOW_1":
case "LOW_2":
case "LOW_3":
case "VERY_LOW":
case "LOWEST":
return true;
default:
System.err.println("Not valid value for priority. Allowed values are HIGHEST, VERY_HIGH, HIGH_[1-3], " +
"NORMAL_[1-6], LOW_[1-3], VERY_LOW, and LOWEST.");
return false;
}
}
@Inject
private HelpOption helpOption;
@Option(name = {"--useV3Protocol"}, description = "Use V3 protocol to gateway. This is the default protocol.")
private boolean enableV3Protocol = true;
@Option(name = {"--file"},
description = "The name of the input file to read.")
private String fileArg = null;
@Option(name = {"--add-root-element-to-xml"},
description = "Add <vespafeed> tag to XML document, makes it easier to feed raw data.")
private boolean addRootElementToXml = false;
@Option(name = {"--route"},
description = "(=default)The route to send the data to.")
private String routeArg = "default";
@Option(name = {"--endpoint"},
description = "Vespa endpoint.")
private String endpointArg;
@Option(name = {"--host"},
description = "The host(s) for the gateway. If using several, use comma to separate them.")
private String hostArg;
@Option(name = {"--port"},
description = "The port for the host of the gateway.")
private int portArg = 4080;
@Option(name = {"--timeout"},
description = "(=180) The time (in seconds) allowed for sending operations.")
private long timeoutArg = 180;
@Option(name = {"--useCompression"},
description = "Use compression over network.")
private boolean useCompressionArg = false;
@Option(name = {"--useDynamicThrottling"},
description = "Try to maximize throughput by using dynamic throttling.")
private boolean useDynamicThrottlingArg = false;
@Option(name = {"--maxpending"},
description = "The maximum number of operations that are allowed " +
"to be pending at any given time.")
private int maxPendingOperationCountArg = 10000;
@Option(name = {"-v", "--verbose"},
description = "Enable verbose output of progress.")
private boolean verboseArg = false;
@Option(name = {"--noretry"},
description = "Turns off retries of recoverable failures..")
private boolean noRetryArg = false;
@Option(name = {"--retrydelay"},
description = "The time (in seconds) to wait between retries of a failed operation.")
private int retrydelayArg = 1;
@Option(name = {"--trace"},
description = "(=0 (=off)) The trace level of network traffic.")
private int traceArg = 0;
@Option(name = {"--printTraceEveryXOperation"},
description = "(=1) How often to to tracing.")
private int traceEveryXOperation = 1;
@Option(name = {"--validate"},
description = "Run validation tool on input files instead of feeding them.")
private boolean validateArg = false;
@Option(name = {"--priority"},
description = "Specify priority of sent messages, see documentation ")
private String priorityArg = null;
@Option(name = {"--numPersistentConnectionsPerEndpoint"},
description = "How many tcp connections to establish per endoint.)")
private int numPersistentConnectionsPerEndpoint = 4;
@Option(name = {"--maxChunkSizeBytes"},
description = "How much data to send to gateway in each message.")
private int maxChunkSizeBytes = 20 * 1024;
@Option(name = {"--whenVerboseEnabledPrintMessageForEveryXDocuments"},
description = "How often to print verbose message.)")
private int whenVerboseEnabledPrintMessageForEveryXDocuments = 1000;
@Option(name = {"--useTls"},
description = "Use TLS when connecting to endpoint")
private boolean useTls = false;
@Option(name = {"--insecure"},
description = "Skip hostname verification when using TLS")
private boolean insecure = false;
@Option(name = {"--header"},
description = "Add http header to every request. Header must have the format '<Name>: <Value>'. Use this parameter multiple times for multiple headers")
private List<String> headers = new ArrayList<>();
private final List<Header> parsedHeaders = new ArrayList<>();
int getWhenVerboseEnabledPrintMessageForEveryXDocuments() {
return whenVerboseEnabledPrintMessageForEveryXDocuments;
}
public String getFile() { return fileArg; };
public boolean getVerbose() { return verboseArg; }
public boolean getAddRootElementToXml() { return addRootElementToXml; }
SessionParams createSessionParams(boolean useJson) {
final int minThrottleValue = useDynamicThrottlingArg ? 10 : 0;
ConnectionParams.Builder connectionParamsBuilder = new ConnectionParams.Builder();
parsedHeaders.forEach(header -> connectionParamsBuilder.addHeader(header.getName(), header.getValue()));
SessionParams.Builder builder = new SessionParams.Builder()
.setFeedParams(
new FeedParams.Builder()
.setDataFormat(useJson
? FeedParams.DataFormat.JSON_UTF8
: FeedParams.DataFormat.XML_UTF8)
.setRoute(routeArg)
.setMaxInFlightRequests(maxPendingOperationCountArg)
.setClientTimeout(timeoutArg, TimeUnit.SECONDS)
.setServerTimeout(timeoutArg, TimeUnit.SECONDS)
.setLocalQueueTimeOut(timeoutArg * 1000)
.setPriority(priorityArg)
.setMaxChunkSizeBytes(maxChunkSizeBytes)
.build()
)
.setConnectionParams(
connectionParamsBuilder
.setHostnameVerifier(insecure ? NoopHostnameVerifier.INSTANCE :
SSLConnectionSocketFactory.getDefaultHostnameVerifier())
.setUseCompression(useCompressionArg)
.setMaxRetries(noRetryArg ? 0 : 100)
.setMinTimeBetweenRetries(retrydelayArg, TimeUnit.SECONDS)
.setDryRun(validateArg)
.setTraceLevel(traceArg)
.setTraceEveryXOperation(traceEveryXOperation)
.setPrintTraceToStdErr(traceArg > 0)
.setNumPersistentConnectionsPerEndpoint(numPersistentConnectionsPerEndpoint)
.build()
)
.setThrottlerMinSize(minThrottleValue)
.setClientQueueSize(maxPendingOperationCountArg);
if (endpointArg != null) {
try {
builder.addCluster(new Cluster.Builder()
.addEndpoint(Endpoint.create(new URL(endpointArg)))
.build());
}
catch (MalformedURLException e) {}
}
else {
Iterable<String> hosts = Splitter.on(',').trimResults().split(hostArg);
for (String host : hosts) {
builder.addCluster(new Cluster.Builder()
.addEndpoint(Endpoint.create(host, portArg, useTls))
.build());
}
}
return builder.build();
}
} |
This is not synchronized. I suggest using an AtomicInt as locking does not seem necessary. | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
parent.notifyDone(this);
} | while (state == OPEN) { | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
parent.notifyDone(this);
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} |
now done in transport thread | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
parent.notifyDone(this);
} | while (state == OPEN) { | private void run() {
while (state == OPEN) {
try {
selector.select(100);
} catch (IOException e) {
log.log(Level.WARNING, "error during select", e);
}
handleEvents();
Iterator<SelectionKey> keys = selector.selectedKeys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
keys.remove();
if (!handleIOEvents(conn, key)) {
handleCloseConnection(conn);
}
}
scheduler.checkTasks(System.currentTimeMillis());
}
synchronized (this) {
state = CLOSED;
}
handleEvents();
Iterator<SelectionKey> keys = selector.keys().iterator();
while (keys.hasNext()) {
SelectionKey key = keys.next();
Connection conn = (Connection) key.attachment();
handleCloseConnection(conn);
}
try { selector.close(); } catch (Exception e) {}
parent.notifyDone(this);
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} | class SyncCmd implements Runnable {
boolean done = false;
public synchronized void waitDone() {
while (!done) {
try { wait(); } catch (InterruptedException e) {}
}
}
public synchronized void run() {
done = true;
notify();
}
} |
Wont this command fail when run inside a container that does not have the new metrics proxy? F.ex. 6.330? | private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
runPushMetricsCommand(context, wrappedMetrics, true);
runPushMetricsCommand(context, wrappedMetrics, false);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
} | runPushMetricsCommand(context, wrappedMetrics, true); | private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
} catch (JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to wrap metrics in secret agent report", e);
return;
}
String wrappedMetrics = "s:" + params.toString();
runPushMetricsCommand(context, wrappedMetrics, true);
runPushMetricsCommand(context, wrappedMetrics, false);
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContextSupplier contextSupplier;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Optional<CredentialsMaintainer> credentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private final DoubleFlag containerCpuCap;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContextSupplier contextSupplier,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final FlagSource flagSource,
final Optional<CredentialsMaintainer> credentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.contextSupplier = contextSupplier;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.credentialsMaintainer = credentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource)
.with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname());
this.loopThread = new Thread(() -> {
while (!terminated.get()) {
try {
NodeAgentContext context = contextSupplier.nextContext();
converge(context);
} catch (InterruptedException ignored) { }
}
});
this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname());
}
@Override
public void start() {
loopThread.start();
}
@Override
public void stopForRemoval() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
contextSupplier.interrupt();
do {
try {
loopThread.join();
} catch (InterruptedException ignored) { }
} while (loopThread.isAlive());
contextSupplier.currentContext().log(logger, "Stopped");
}
void startServicesIfNeeded(NodeAgentContext context) {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeAgentContext context) {
if (!hasResumedNode) {
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (context.node().getWantedRestartGeneration().isPresent() &&
!Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) {
DockerImage currentImage = context.node().getCurrentDockerImage().orElse(DockerImage.EMPTY);
DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY);
currentNodeAttributes.withDockerImage(currentImage);
currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion());
newNodeAttributes.withDockerImage(newImage);
newNodeAttributes.withVespaVersion(newImage.tagAsVersion());
}
publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeAgentContext context) {
ContainerData containerData = createContainerData(context);
dockerOperations.createContainer(context, containerData, getContainerResources(context.node()));
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(
NodeAgentContext context, Optional<Container> existingContainer) {
if (existingContainer.isPresent()) {
Optional<String> reason = shouldRemoveContainer(context.node(), existingContainer.get());
if (reason.isPresent()) {
removeContainer(context, existingContainer.get(), reason.get(), false);
return Optional.empty();
}
shouldRestartServices(context.node()).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(context, existingContainer.get());
currentRestartGeneration = context.node().getWantedRestartGeneration();
});
}
return existingContainer;
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeAgentContext context, Container existingContainer) {
if (existingContainer.state.isRunning() && context.node().getState() == NodeState.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode(context);
dockerOperations.restartVespa(context);
}
}
private void stopServices() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void stopForHostSuspension() {
NodeAgentContext context = contextSupplier.currentContext();
getContainer(context).ifPresent(container -> removeContainer(context, container, "suspending host", true));
}
public void suspend() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final NodeState nodeState = node.getState();
if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
ContainerResources wantedContainerResources = getContainerResources(node);
if (!wantedContainerResources.equalsMemory(existingContainer.resources)) {
return Optional.of("Container should be running with different memory allocation, wanted: " +
wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory());
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private void removeContainer(NodeAgentContext context, Container existingContainer, String reason, boolean alreadySuspended) {
context.log(logger, "Will remove container: " + reason);
if (existingContainer.state.isRunning()) {
if (!alreadySuspended) {
orchestratorSuspendNode(context);
}
try {
if (context.node().getState() != NodeState.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = context.node().getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
}
private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
ContainerResources wantedContainerResources = getContainerResources(context.node());
if (wantedContainerResources.equalsCpu(existingContainer.resources)) return;
context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s",
wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu());
orchestratorSuspendNode(context);
dockerOperations.updateContainer(context, wantedContainerResources);
}
private ContainerResources getContainerResources(NodeSpec node) {
double cpuCap = node.getOwner()
.map(NodeOwner::asApplicationId)
.map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()))
.orElse(containerCpuCap)
.value() * node.getMinCpuCores();
return ContainerResources.from(cpuCap, node.getMinCpuCores(), node.getMinMainMemoryAvailableGb());
}
private void scheduleDownLoadIfNeeded(NodeSpec node, Optional<Container> container) {
if (node.getWantedDockerImage().equals(container.map(c -> c.image))) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
public void converge(NodeAgentContext context) {
try {
doConverge(context);
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Throwable e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e);
}
}
void doConverge(NodeAgentContext context) {
NodeSpec node = context.node();
Optional<Container> container = getContainer(context);
if (!node.equals(lastNode)) {
logChangesToNodeSpec(context, lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() ||
currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false))
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node, container);
if (isDownloadingImage()) {
context.log(logger, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(context, container);
credentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(context);
containerState = UNKNOWN;
} else {
updateContainerIfNeeded(context, container.get());
}
aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
startServicesIfNeeded(context);
resumeNodeIfNeeded(context);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(context);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(context, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
credentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context);
nodeRepository.setNodeState(context.hostname().value(), NodeState.ready);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
}
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
if (containerState != UNKNOWN) return;
final NodeAgentContext context = contextSupplier.currentContext();
final NodeSpec node = context.node();
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = stats.getCpuStats().getOnlineCpus();
final long cpuContainerKernelTime = stats.getCpuStats().getUsageInKernelMode();
final long cpuContainerTotalTime = stats.getCpuStats().getTotalUsage();
final long cpuSystemTotalTime = stats.getCpuStats().getSystemCpuUsage();
final long memoryTotalBytes = stats.getMemoryStats().getLimit();
final long memoryTotalBytesUsage = stats.getMemoryStats().getUsage();
final long memoryTotalBytesCache = stats.getMemoryStats().getCache();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("mem_total.used", memoryTotalBytesUsage)
.withMetric("mem_total.util", 100 * memoryTotalUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", interfaceStats.getRxBytes())
.withMetric("net.in.errors", interfaceStats.getRxErrors())
.withMetric("net.in.dropped", interfaceStats.getRxDropped())
.withMetric("net.out.bytes", interfaceStats.getTxBytes())
.withMetric("net.out.errors", interfaceStats.getTxErrors())
.withMetric("net.out.dropped", interfaceStats.getTxDropped())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(context, metrics);
}
private void runPushMetricsCommand(NodeAgentContext context, String wrappedMetrics, boolean newMetricsProxy) {
int port = newMetricsProxy ? 19094 : 19091;
long timeoutSeconds = newMetricsProxy ? 2L : 5L;
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:" + port, "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, timeoutSeconds, command);
}
private Optional<Container> getContainer(NodeAgentContext context) {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode(NodeAgentContext context) {
if (context.node().getState() != NodeState.active) return;
context.log(logger, "Ask Orchestrator for permission to suspend node");
try {
orchestrator.suspend(context.hostname().value());
} catch (OrchestratorException e) {
try {
aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
} catch (RuntimeException suppressed) {
logger.log(LogLevel.WARNING, "Suppressing ACL update failure: " + suppressed);
e.addSuppressed(suppressed);
}
throw e;
}
}
protected ContainerData createContainerData(NodeAgentContext context) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContextSupplier contextSupplier;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Optional<CredentialsMaintainer> credentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private final DoubleFlag containerCpuCap;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContextSupplier contextSupplier,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final FlagSource flagSource,
final Optional<CredentialsMaintainer> credentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.contextSupplier = contextSupplier;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.credentialsMaintainer = credentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource)
.with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname());
this.loopThread = new Thread(() -> {
while (!terminated.get()) {
try {
NodeAgentContext context = contextSupplier.nextContext();
converge(context);
} catch (InterruptedException ignored) { }
}
});
this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname());
}
@Override
public void start() {
loopThread.start();
}
@Override
public void stopForRemoval() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
contextSupplier.interrupt();
do {
try {
loopThread.join();
} catch (InterruptedException ignored) { }
} while (loopThread.isAlive());
contextSupplier.currentContext().log(logger, "Stopped");
}
void startServicesIfNeeded(NodeAgentContext context) {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeAgentContext context) {
if (!hasResumedNode) {
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (context.node().getWantedRestartGeneration().isPresent() &&
!Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) {
DockerImage currentImage = context.node().getCurrentDockerImage().orElse(DockerImage.EMPTY);
DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY);
currentNodeAttributes.withDockerImage(currentImage);
currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion());
newNodeAttributes.withDockerImage(newImage);
newNodeAttributes.withVespaVersion(newImage.tagAsVersion());
}
publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeAgentContext context) {
ContainerData containerData = createContainerData(context);
dockerOperations.createContainer(context, containerData, getContainerResources(context.node()));
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(
NodeAgentContext context, Optional<Container> existingContainer) {
if (existingContainer.isPresent()) {
Optional<String> reason = shouldRemoveContainer(context.node(), existingContainer.get());
if (reason.isPresent()) {
removeContainer(context, existingContainer.get(), reason.get(), false);
return Optional.empty();
}
shouldRestartServices(context.node()).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(context, existingContainer.get());
currentRestartGeneration = context.node().getWantedRestartGeneration();
});
}
return existingContainer;
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeAgentContext context, Container existingContainer) {
if (existingContainer.state.isRunning() && context.node().getState() == NodeState.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode(context);
dockerOperations.restartVespa(context);
}
}
private void stopServices() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void stopForHostSuspension() {
NodeAgentContext context = contextSupplier.currentContext();
getContainer(context).ifPresent(container -> removeContainer(context, container, "suspending host", true));
}
public void suspend() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final NodeState nodeState = node.getState();
if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
ContainerResources wantedContainerResources = getContainerResources(node);
if (!wantedContainerResources.equalsMemory(existingContainer.resources)) {
return Optional.of("Container should be running with different memory allocation, wanted: " +
wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory());
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private void removeContainer(NodeAgentContext context, Container existingContainer, String reason, boolean alreadySuspended) {
context.log(logger, "Will remove container: " + reason);
if (existingContainer.state.isRunning()) {
if (!alreadySuspended) {
orchestratorSuspendNode(context);
}
try {
if (context.node().getState() != NodeState.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = context.node().getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
}
private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
ContainerResources wantedContainerResources = getContainerResources(context.node());
if (wantedContainerResources.equalsCpu(existingContainer.resources)) return;
context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s",
wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu());
orchestratorSuspendNode(context);
dockerOperations.updateContainer(context, wantedContainerResources);
}
private ContainerResources getContainerResources(NodeSpec node) {
double cpuCap = node.getOwner()
.map(NodeOwner::asApplicationId)
.map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()))
.orElse(containerCpuCap)
.value() * node.getMinCpuCores();
return ContainerResources.from(cpuCap, node.getMinCpuCores(), node.getMinMainMemoryAvailableGb());
}
private void scheduleDownLoadIfNeeded(NodeSpec node, Optional<Container> container) {
if (node.getWantedDockerImage().equals(container.map(c -> c.image))) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
public void converge(NodeAgentContext context) {
try {
doConverge(context);
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Throwable e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e);
}
}
void doConverge(NodeAgentContext context) {
NodeSpec node = context.node();
Optional<Container> container = getContainer(context);
if (!node.equals(lastNode)) {
logChangesToNodeSpec(context, lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() ||
currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false))
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node, container);
if (isDownloadingImage()) {
context.log(logger, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(context, container);
credentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(context);
containerState = UNKNOWN;
} else {
updateContainerIfNeeded(context, container.get());
}
aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
startServicesIfNeeded(context);
resumeNodeIfNeeded(context);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(context);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(context, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
credentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context);
nodeRepository.setNodeState(context.hostname().value(), NodeState.ready);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
}
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
if (containerState != UNKNOWN) return;
final NodeAgentContext context = contextSupplier.currentContext();
final NodeSpec node = context.node();
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = stats.getCpuStats().getOnlineCpus();
final long cpuContainerKernelTime = stats.getCpuStats().getUsageInKernelMode();
final long cpuContainerTotalTime = stats.getCpuStats().getTotalUsage();
final long cpuSystemTotalTime = stats.getCpuStats().getSystemCpuUsage();
final long memoryTotalBytes = stats.getMemoryStats().getLimit();
final long memoryTotalBytesUsage = stats.getMemoryStats().getUsage();
final long memoryTotalBytesCache = stats.getMemoryStats().getCache();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("mem_total.used", memoryTotalBytesUsage)
.withMetric("mem_total.util", 100 * memoryTotalUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", interfaceStats.getRxBytes())
.withMetric("net.in.errors", interfaceStats.getRxErrors())
.withMetric("net.in.dropped", interfaceStats.getRxDropped())
.withMetric("net.out.bytes", interfaceStats.getTxBytes())
.withMetric("net.out.errors", interfaceStats.getTxErrors())
.withMetric("net.out.dropped", interfaceStats.getTxDropped())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(context, metrics);
}
private void runPushMetricsCommand(NodeAgentContext context, String wrappedMetrics, boolean newMetricsProxy) {
int port = newMetricsProxy ? 19094 : 19091;
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:" + port, "setExtraMetrics", wrappedMetrics};
try {
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException e) {
Level level = newMetricsProxy ? LogLevel.DEBUG : LogLevel.WARNING;
context.log(logger, level, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer(NodeAgentContext context) {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode(NodeAgentContext context) {
if (context.node().getState() != NodeState.active) return;
context.log(logger, "Ask Orchestrator for permission to suspend node");
try {
orchestrator.suspend(context.hostname().value());
} catch (OrchestratorException e) {
try {
aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
} catch (RuntimeException suppressed) {
logger.log(LogLevel.WARNING, "Suppressing ACL update failure: " + suppressed);
e.addSuppressed(suppressed);
}
throw e;
}
}
protected ContainerData createContainerData(NodeAgentContext context) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} |
Ok, so it does fail, but it looks like this method does not throw on exit code != 0, so should be ok... | private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
runPushMetricsCommand(context, wrappedMetrics, true);
runPushMetricsCommand(context, wrappedMetrics, false);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
} | runPushMetricsCommand(context, wrappedMetrics, true); | private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
} catch (JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to wrap metrics in secret agent report", e);
return;
}
String wrappedMetrics = "s:" + params.toString();
runPushMetricsCommand(context, wrappedMetrics, true);
runPushMetricsCommand(context, wrappedMetrics, false);
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContextSupplier contextSupplier;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Optional<CredentialsMaintainer> credentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private final DoubleFlag containerCpuCap;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContextSupplier contextSupplier,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final FlagSource flagSource,
final Optional<CredentialsMaintainer> credentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.contextSupplier = contextSupplier;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.credentialsMaintainer = credentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource)
.with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname());
this.loopThread = new Thread(() -> {
while (!terminated.get()) {
try {
NodeAgentContext context = contextSupplier.nextContext();
converge(context);
} catch (InterruptedException ignored) { }
}
});
this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname());
}
@Override
public void start() {
loopThread.start();
}
@Override
public void stopForRemoval() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
contextSupplier.interrupt();
do {
try {
loopThread.join();
} catch (InterruptedException ignored) { }
} while (loopThread.isAlive());
contextSupplier.currentContext().log(logger, "Stopped");
}
void startServicesIfNeeded(NodeAgentContext context) {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeAgentContext context) {
if (!hasResumedNode) {
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (context.node().getWantedRestartGeneration().isPresent() &&
!Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) {
DockerImage currentImage = context.node().getCurrentDockerImage().orElse(DockerImage.EMPTY);
DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY);
currentNodeAttributes.withDockerImage(currentImage);
currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion());
newNodeAttributes.withDockerImage(newImage);
newNodeAttributes.withVespaVersion(newImage.tagAsVersion());
}
publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeAgentContext context) {
ContainerData containerData = createContainerData(context);
dockerOperations.createContainer(context, containerData, getContainerResources(context.node()));
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(
NodeAgentContext context, Optional<Container> existingContainer) {
if (existingContainer.isPresent()) {
Optional<String> reason = shouldRemoveContainer(context.node(), existingContainer.get());
if (reason.isPresent()) {
removeContainer(context, existingContainer.get(), reason.get(), false);
return Optional.empty();
}
shouldRestartServices(context.node()).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(context, existingContainer.get());
currentRestartGeneration = context.node().getWantedRestartGeneration();
});
}
return existingContainer;
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeAgentContext context, Container existingContainer) {
if (existingContainer.state.isRunning() && context.node().getState() == NodeState.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode(context);
dockerOperations.restartVespa(context);
}
}
private void stopServices() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void stopForHostSuspension() {
NodeAgentContext context = contextSupplier.currentContext();
getContainer(context).ifPresent(container -> removeContainer(context, container, "suspending host", true));
}
public void suspend() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final NodeState nodeState = node.getState();
if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
ContainerResources wantedContainerResources = getContainerResources(node);
if (!wantedContainerResources.equalsMemory(existingContainer.resources)) {
return Optional.of("Container should be running with different memory allocation, wanted: " +
wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory());
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private void removeContainer(NodeAgentContext context, Container existingContainer, String reason, boolean alreadySuspended) {
context.log(logger, "Will remove container: " + reason);
if (existingContainer.state.isRunning()) {
if (!alreadySuspended) {
orchestratorSuspendNode(context);
}
try {
if (context.node().getState() != NodeState.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = context.node().getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
}
private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
ContainerResources wantedContainerResources = getContainerResources(context.node());
if (wantedContainerResources.equalsCpu(existingContainer.resources)) return;
context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s",
wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu());
orchestratorSuspendNode(context);
dockerOperations.updateContainer(context, wantedContainerResources);
}
private ContainerResources getContainerResources(NodeSpec node) {
double cpuCap = node.getOwner()
.map(NodeOwner::asApplicationId)
.map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()))
.orElse(containerCpuCap)
.value() * node.getMinCpuCores();
return ContainerResources.from(cpuCap, node.getMinCpuCores(), node.getMinMainMemoryAvailableGb());
}
private void scheduleDownLoadIfNeeded(NodeSpec node, Optional<Container> container) {
if (node.getWantedDockerImage().equals(container.map(c -> c.image))) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
public void converge(NodeAgentContext context) {
try {
doConverge(context);
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Throwable e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e);
}
}
void doConverge(NodeAgentContext context) {
NodeSpec node = context.node();
Optional<Container> container = getContainer(context);
if (!node.equals(lastNode)) {
logChangesToNodeSpec(context, lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() ||
currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false))
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node, container);
if (isDownloadingImage()) {
context.log(logger, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(context, container);
credentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(context);
containerState = UNKNOWN;
} else {
updateContainerIfNeeded(context, container.get());
}
aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
startServicesIfNeeded(context);
resumeNodeIfNeeded(context);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(context);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(context, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
credentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context);
nodeRepository.setNodeState(context.hostname().value(), NodeState.ready);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
}
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
if (containerState != UNKNOWN) return;
final NodeAgentContext context = contextSupplier.currentContext();
final NodeSpec node = context.node();
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = stats.getCpuStats().getOnlineCpus();
final long cpuContainerKernelTime = stats.getCpuStats().getUsageInKernelMode();
final long cpuContainerTotalTime = stats.getCpuStats().getTotalUsage();
final long cpuSystemTotalTime = stats.getCpuStats().getSystemCpuUsage();
final long memoryTotalBytes = stats.getMemoryStats().getLimit();
final long memoryTotalBytesUsage = stats.getMemoryStats().getUsage();
final long memoryTotalBytesCache = stats.getMemoryStats().getCache();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("mem_total.used", memoryTotalBytesUsage)
.withMetric("mem_total.util", 100 * memoryTotalUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", interfaceStats.getRxBytes())
.withMetric("net.in.errors", interfaceStats.getRxErrors())
.withMetric("net.in.dropped", interfaceStats.getRxDropped())
.withMetric("net.out.bytes", interfaceStats.getTxBytes())
.withMetric("net.out.errors", interfaceStats.getTxErrors())
.withMetric("net.out.dropped", interfaceStats.getTxDropped())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(context, metrics);
}
private void runPushMetricsCommand(NodeAgentContext context, String wrappedMetrics, boolean newMetricsProxy) {
int port = newMetricsProxy ? 19094 : 19091;
long timeoutSeconds = newMetricsProxy ? 2L : 5L;
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:" + port, "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, timeoutSeconds, command);
}
private Optional<Container> getContainer(NodeAgentContext context) {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode(NodeAgentContext context) {
if (context.node().getState() != NodeState.active) return;
context.log(logger, "Ask Orchestrator for permission to suspend node");
try {
orchestrator.suspend(context.hostname().value());
} catch (OrchestratorException e) {
try {
aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
} catch (RuntimeException suppressed) {
logger.log(LogLevel.WARNING, "Suppressing ACL update failure: " + suppressed);
e.addSuppressed(suppressed);
}
throw e;
}
}
protected ContainerData createContainerData(NodeAgentContext context) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} | class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
private final NodeAgentContextSupplier contextSupplier;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Optional<CredentialsMaintainer> credentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private final Optional<HealthChecker> healthChecker;
private final DoubleFlag containerCpuCap;
private int numberOfUnhandledException = 0;
private DockerImage imageBeingDownloaded = null;
private long currentRebootGeneration = 0;
private Optional<Long> currentRestartGeneration = Optional.empty();
private final Thread loopThread;
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContextSupplier contextSupplier,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final FlagSource flagSource,
final Optional<CredentialsMaintainer> credentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.contextSupplier = contextSupplier;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.credentialsMaintainer = credentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource)
.with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname());
this.loopThread = new Thread(() -> {
while (!terminated.get()) {
try {
NodeAgentContext context = contextSupplier.nextContext();
converge(context);
} catch (InterruptedException ignored) { }
}
});
this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname());
}
@Override
public void start() {
loopThread.start();
}
@Override
public void stopForRemoval() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
contextSupplier.interrupt();
do {
try {
loopThread.join();
} catch (InterruptedException ignored) { }
} while (loopThread.isAlive());
contextSupplier.currentContext().log(logger, "Stopped");
}
void startServicesIfNeeded(NodeAgentContext context) {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeAgentContext context) {
if (!hasResumedNode) {
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (context.node().getWantedRestartGeneration().isPresent() &&
!Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) {
currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) {
currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) {
DockerImage currentImage = context.node().getCurrentDockerImage().orElse(DockerImage.EMPTY);
DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY);
currentNodeAttributes.withDockerImage(currentImage);
currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion());
newNodeAttributes.withDockerImage(newImage);
newNodeAttributes.withVespaVersion(newImage.tagAsVersion());
}
publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeAgentContext context) {
ContainerData containerData = createContainerData(context);
dockerOperations.createContainer(context, containerData, getContainerResources(context.node()));
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(
NodeAgentContext context, Optional<Container> existingContainer) {
if (existingContainer.isPresent()) {
Optional<String> reason = shouldRemoveContainer(context.node(), existingContainer.get());
if (reason.isPresent()) {
removeContainer(context, existingContainer.get(), reason.get(), false);
return Optional.empty();
}
shouldRestartServices(context.node()).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(context, existingContainer.get());
currentRestartGeneration = context.node().getWantedRestartGeneration();
});
}
return existingContainer;
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeAgentContext context, Container existingContainer) {
if (existingContainer.state.isRunning() && context.node().getState() == NodeState.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode(context);
dockerOperations.restartVespa(context);
}
}
private void stopServices() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void stopForHostSuspension() {
NodeAgentContext context = contextSupplier.currentContext();
getContainer(context).ifPresent(container -> removeContainer(context, container, "suspending host", true));
}
public void suspend() {
NodeAgentContext context = contextSupplier.currentContext();
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final NodeState nodeState = node.getState();
if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
if (currentRebootGeneration < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
currentRebootGeneration, node.getWantedRebootGeneration()));
}
ContainerResources wantedContainerResources = getContainerResources(node);
if (!wantedContainerResources.equalsMemory(existingContainer.resources)) {
return Optional.of("Container should be running with different memory allocation, wanted: " +
wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory());
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private void removeContainer(NodeAgentContext context, Container existingContainer, String reason, boolean alreadySuspended) {
context.log(logger, "Will remove container: " + reason);
if (existingContainer.state.isRunning()) {
if (!alreadySuspended) {
orchestratorSuspendNode(context);
}
try {
if (context.node().getState() != NodeState.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
currentRebootGeneration = context.node().getWantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
}
private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) {
ContainerResources wantedContainerResources = getContainerResources(context.node());
if (wantedContainerResources.equalsCpu(existingContainer.resources)) return;
context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s",
wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu());
orchestratorSuspendNode(context);
dockerOperations.updateContainer(context, wantedContainerResources);
}
private ContainerResources getContainerResources(NodeSpec node) {
double cpuCap = node.getOwner()
.map(NodeOwner::asApplicationId)
.map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()))
.orElse(containerCpuCap)
.value() * node.getMinCpuCores();
return ContainerResources.from(cpuCap, node.getMinCpuCores(), node.getMinMainMemoryAvailableGb());
}
private void scheduleDownLoadIfNeeded(NodeSpec node, Optional<Container> container) {
if (node.getWantedDockerImage().equals(container.map(c -> c.image))) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
public void converge(NodeAgentContext context) {
try {
doConverge(context);
} catch (OrchestratorException | ConvergenceException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Throwable e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e);
}
}
void doConverge(NodeAgentContext context) {
NodeSpec node = context.node();
Optional<Container> container = getContainer(context);
if (!node.equals(lastNode)) {
logChangesToNodeSpec(context, lastNode, node);
if (currentRebootGeneration < node.getCurrentRebootGeneration())
currentRebootGeneration = node.getCurrentRebootGeneration();
if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() ||
currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false))
currentRestartGeneration = node.getCurrentRestartGeneration();
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node, container);
if (isDownloadingImage()) {
context.log(logger, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(context, container);
credentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
if (! container.isPresent()) {
containerState = STARTING;
startContainer(context);
containerState = UNKNOWN;
} else {
updateContainerIfNeeded(context, container.get());
}
aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
startServicesIfNeeded(context);
resumeNodeIfNeeded(context);
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(context);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(context, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
credentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context);
nodeRepository.setNodeState(context.hostname().value(), NodeState.ready);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
}
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
if (containerState != UNKNOWN) return;
final NodeAgentContext context = contextSupplier.currentContext();
final NodeSpec node = context.node();
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = stats.getCpuStats().getOnlineCpus();
final long cpuContainerKernelTime = stats.getCpuStats().getUsageInKernelMode();
final long cpuContainerTotalTime = stats.getCpuStats().getTotalUsage();
final long cpuSystemTotalTime = stats.getCpuStats().getSystemCpuUsage();
final long memoryTotalBytes = stats.getMemoryStats().getLimit();
final long memoryTotalBytesUsage = stats.getMemoryStats().getUsage();
final long memoryTotalBytesCache = stats.getMemoryStats().getCache();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("mem_total.used", memoryTotalBytesUsage)
.withMetric("mem_total.util", 100 * memoryTotalUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", interfaceStats.getRxBytes())
.withMetric("net.in.errors", interfaceStats.getRxErrors())
.withMetric("net.in.dropped", interfaceStats.getRxDropped())
.withMetric("net.out.bytes", interfaceStats.getTxBytes())
.withMetric("net.out.errors", interfaceStats.getTxErrors())
.withMetric("net.out.dropped", interfaceStats.getTxDropped())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(context, metrics);
}
private void runPushMetricsCommand(NodeAgentContext context, String wrappedMetrics, boolean newMetricsProxy) {
int port = newMetricsProxy ? 19094 : 19091;
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:" + port, "setExtraMetrics", wrappedMetrics};
try {
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException e) {
Level level = newMetricsProxy ? LogLevel.DEBUG : LogLevel.WARNING;
context.log(logger, level, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer(NodeAgentContext context) {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode(NodeAgentContext context) {
if (context.node().getState() != NodeState.active) return;
context.log(logger, "Ask Orchestrator for permission to suspend node");
try {
orchestrator.suspend(context.hostname().value());
} catch (OrchestratorException e) {
try {
aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
} catch (RuntimeException suppressed) {
logger.log(LogLevel.WARNING, "Suppressing ACL update failure: " + suppressed);
e.addSuppressed(suppressed);
}
throw e;
}
}
protected ContainerData createContainerData(NodeAgentContext context) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
} |
Should this be `dev-aws-us-east-1a-runs.json`? Its not used anywhere | public void testResponses() {
InternalDeploymentTester tester = new InternalDeploymentTester();
tester.clock().setInstant(Instant.EPOCH);
ApplicationVersion revision1 = tester.deployNewSubmission();
assertEquals(2, tester.app().deploymentJobs().projectId().getAsLong());
tester.clock().advance(Duration.ofMillis(1000));
ApplicationVersion revision2 = tester.newSubmission();
tester.runJob(systemTest);
tester.runJob(stagingTest);
tester.runJob(productionUsCentral1);
tester.tester().readyJobTrigger().maintain();
tester.configServer().throwOnNextPrepare(new ConfigServerException(URI.create("url"), "ERROR!", INVALID_APPLICATION_PACKAGE, null));
tester.runner().run();
assertEquals(deploymentFailed, tester.jobs().last(appId, productionUsEast3).get().status());
ZoneId usWest1 = productionUsWest1.zone(tester.tester().controller().system());
tester.configServer().convergeServices(appId, usWest1);
tester.configServer().convergeServices(testerId.id(), usWest1);
tester.setEndpoints(appId, usWest1);
tester.setEndpoints(testerId.id(), usWest1);
tester.runner().run();
tester.cloud().set(FAILURE);
tester.runner().run();
assertEquals(testFailure, tester.jobs().last(appId, productionUsWest1).get().status());
assertEquals(revision2, tester.app().deployments().get(productionUsCentral1.zone(tester.tester().controller().system())).applicationVersion());
assertEquals(revision1, tester.app().deployments().get(productionUsEast3.zone(tester.tester().controller().system())).applicationVersion());
assertEquals(revision2, tester.app().deployments().get(productionUsWest1.zone(tester.tester().controller().system())).applicationVersion());
tester.clock().advance(Duration.ofMillis(1000));
tester.newSubmission();
tester.runJob(systemTest);
tester.runJob(stagingTest);
tester.tester().readyJobTrigger().maintain();
tester.tester().readyJobTrigger().maintain();
tester.runner().run();
assertEquals(running, tester.jobs().last(appId, productionUsCentral1).get().status());
assertEquals(running, tester.jobs().last(appId, stagingTest).get().status());
tester.tester().controller().applications().deactivate(appId, stagingTest.zone(tester.tester().controller().system()));
tester.runner().run();
assertEquals(installationFailed, tester.jobs().last(appId, stagingTest).get().status());
tester.clock().advance(Duration.ofMillis(100_000));
tester.tester().readyJobTrigger().maintain();
assertEquals(installationFailed, tester.jobs().last(appId, stagingTest).get().status());
Version platform = new Version("7.1");
tester.tester().upgradeSystem(platform);
assertResponse(JobControllerApiHandlerHelper.runResponse(tester.jobs().runs(appId, stagingTest), URI.create("https:
assertResponse(JobControllerApiHandlerHelper.runDetailsResponse(tester.jobs(), tester.jobs().last(appId, productionUsEast3).get().id(), "0"), "us-east-3-log-without-first.json");
assertResponse(JobControllerApiHandlerHelper.jobTypeResponse(tester.tester().controller(), appId, URI.create("https:
tester.jobs().deploy(appId, JobType.devAwsUsEast2a, Optional.empty(), applicationPackage);
tester.runJob(JobType.devAwsUsEast2a);
assertResponse(JobControllerApiHandlerHelper.runResponse(tester.jobs().runs(appId, stagingTest), URI.create("https:
} | assertResponse(JobControllerApiHandlerHelper.runResponse(tester.jobs().runs(appId, stagingTest), URI.create("https: | public void testResponses() {
InternalDeploymentTester tester = new InternalDeploymentTester();
tester.clock().setInstant(Instant.EPOCH);
ApplicationVersion revision1 = tester.deployNewSubmission();
assertEquals(2, tester.app().deploymentJobs().projectId().getAsLong());
tester.clock().advance(Duration.ofMillis(1000));
ApplicationVersion revision2 = tester.newSubmission();
tester.runJob(systemTest);
tester.runJob(stagingTest);
tester.runJob(productionUsCentral1);
tester.tester().readyJobTrigger().maintain();
tester.configServer().throwOnNextPrepare(new ConfigServerException(URI.create("url"), "ERROR!", INVALID_APPLICATION_PACKAGE, null));
tester.runner().run();
assertEquals(deploymentFailed, tester.jobs().last(appId, productionUsEast3).get().status());
ZoneId usWest1 = productionUsWest1.zone(tester.tester().controller().system());
tester.configServer().convergeServices(appId, usWest1);
tester.configServer().convergeServices(testerId.id(), usWest1);
tester.setEndpoints(appId, usWest1);
tester.setEndpoints(testerId.id(), usWest1);
tester.runner().run();
tester.cloud().set(FAILURE);
tester.runner().run();
assertEquals(testFailure, tester.jobs().last(appId, productionUsWest1).get().status());
assertEquals(revision2, tester.app().deployments().get(productionUsCentral1.zone(tester.tester().controller().system())).applicationVersion());
assertEquals(revision1, tester.app().deployments().get(productionUsEast3.zone(tester.tester().controller().system())).applicationVersion());
assertEquals(revision2, tester.app().deployments().get(productionUsWest1.zone(tester.tester().controller().system())).applicationVersion());
tester.clock().advance(Duration.ofMillis(1000));
tester.newSubmission();
tester.runJob(systemTest);
tester.runJob(stagingTest);
tester.tester().readyJobTrigger().maintain();
tester.tester().readyJobTrigger().maintain();
tester.runner().run();
assertEquals(running, tester.jobs().last(appId, productionUsCentral1).get().status());
assertEquals(running, tester.jobs().last(appId, stagingTest).get().status());
tester.tester().controller().applications().deactivate(appId, stagingTest.zone(tester.tester().controller().system()));
tester.runner().run();
assertEquals(installationFailed, tester.jobs().last(appId, stagingTest).get().status());
tester.clock().advance(Duration.ofMillis(100_000));
tester.tester().readyJobTrigger().maintain();
assertEquals(installationFailed, tester.jobs().last(appId, stagingTest).get().status());
Version platform = new Version("7.1");
tester.tester().upgradeSystem(platform);
assertResponse(JobControllerApiHandlerHelper.runResponse(tester.jobs().runs(appId, stagingTest), URI.create("https:
assertResponse(JobControllerApiHandlerHelper.runDetailsResponse(tester.jobs(), tester.jobs().last(appId, productionUsEast3).get().id(), "0"), "us-east-3-log-without-first.json");
assertResponse(JobControllerApiHandlerHelper.jobTypeResponse(tester.tester().controller(), appId, URI.create("https:
tester.jobs().deploy(appId, JobType.devAwsUsEast2a, Optional.empty(), applicationPackage);
tester.runJob(JobType.devAwsUsEast2a);
assertResponse(JobControllerApiHandlerHelper.runResponse(tester.jobs().runs(appId, stagingTest), URI.create("https:
} | class JobControllerApiHandlerHelperTest {
@Test
private void compare(HttpResponse response, String expected) throws JSONException, IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
response.render(baos);
System.err.println(baos);
JSONObject actualJSON = new JSONObject(new String(baos.toByteArray()));
JSONObject expectedJSON = new JSONObject(expected);
assertEquals(expectedJSON.toString(), actualJSON.toString());
}
private void assertResponse(HttpResponse response, String fileName) {
try {
Path path = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/").resolve(fileName);
String expected = new String(Files.readAllBytes(path));
compare(response, expected);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
} | class JobControllerApiHandlerHelperTest {
@Test
private void compare(HttpResponse response, String expected) throws JSONException, IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
response.render(baos);
System.err.println(baos);
JSONObject actualJSON = new JSONObject(new String(baos.toByteArray()));
JSONObject expectedJSON = new JSONObject(expected);
assertEquals(expectedJSON.toString(), actualJSON.toString());
}
private void assertResponse(HttpResponse response, String fileName) {
try {
Path path = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/").resolve(fileName);
String expected = new String(Files.readAllBytes(path));
compare(response, expected);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
} |
🤦♂ | public void testResponses() {
InternalDeploymentTester tester = new InternalDeploymentTester();
tester.clock().setInstant(Instant.EPOCH);
ApplicationVersion revision1 = tester.deployNewSubmission();
assertEquals(2, tester.app().deploymentJobs().projectId().getAsLong());
tester.clock().advance(Duration.ofMillis(1000));
ApplicationVersion revision2 = tester.newSubmission();
tester.runJob(systemTest);
tester.runJob(stagingTest);
tester.runJob(productionUsCentral1);
tester.tester().readyJobTrigger().maintain();
tester.configServer().throwOnNextPrepare(new ConfigServerException(URI.create("url"), "ERROR!", INVALID_APPLICATION_PACKAGE, null));
tester.runner().run();
assertEquals(deploymentFailed, tester.jobs().last(appId, productionUsEast3).get().status());
ZoneId usWest1 = productionUsWest1.zone(tester.tester().controller().system());
tester.configServer().convergeServices(appId, usWest1);
tester.configServer().convergeServices(testerId.id(), usWest1);
tester.setEndpoints(appId, usWest1);
tester.setEndpoints(testerId.id(), usWest1);
tester.runner().run();
tester.cloud().set(FAILURE);
tester.runner().run();
assertEquals(testFailure, tester.jobs().last(appId, productionUsWest1).get().status());
assertEquals(revision2, tester.app().deployments().get(productionUsCentral1.zone(tester.tester().controller().system())).applicationVersion());
assertEquals(revision1, tester.app().deployments().get(productionUsEast3.zone(tester.tester().controller().system())).applicationVersion());
assertEquals(revision2, tester.app().deployments().get(productionUsWest1.zone(tester.tester().controller().system())).applicationVersion());
tester.clock().advance(Duration.ofMillis(1000));
tester.newSubmission();
tester.runJob(systemTest);
tester.runJob(stagingTest);
tester.tester().readyJobTrigger().maintain();
tester.tester().readyJobTrigger().maintain();
tester.runner().run();
assertEquals(running, tester.jobs().last(appId, productionUsCentral1).get().status());
assertEquals(running, tester.jobs().last(appId, stagingTest).get().status());
tester.tester().controller().applications().deactivate(appId, stagingTest.zone(tester.tester().controller().system()));
tester.runner().run();
assertEquals(installationFailed, tester.jobs().last(appId, stagingTest).get().status());
tester.clock().advance(Duration.ofMillis(100_000));
tester.tester().readyJobTrigger().maintain();
assertEquals(installationFailed, tester.jobs().last(appId, stagingTest).get().status());
Version platform = new Version("7.1");
tester.tester().upgradeSystem(platform);
assertResponse(JobControllerApiHandlerHelper.runResponse(tester.jobs().runs(appId, stagingTest), URI.create("https:
assertResponse(JobControllerApiHandlerHelper.runDetailsResponse(tester.jobs(), tester.jobs().last(appId, productionUsEast3).get().id(), "0"), "us-east-3-log-without-first.json");
assertResponse(JobControllerApiHandlerHelper.jobTypeResponse(tester.tester().controller(), appId, URI.create("https:
tester.jobs().deploy(appId, JobType.devAwsUsEast2a, Optional.empty(), applicationPackage);
tester.runJob(JobType.devAwsUsEast2a);
assertResponse(JobControllerApiHandlerHelper.runResponse(tester.jobs().runs(appId, stagingTest), URI.create("https:
} | assertResponse(JobControllerApiHandlerHelper.runResponse(tester.jobs().runs(appId, stagingTest), URI.create("https: | public void testResponses() {
InternalDeploymentTester tester = new InternalDeploymentTester();
tester.clock().setInstant(Instant.EPOCH);
ApplicationVersion revision1 = tester.deployNewSubmission();
assertEquals(2, tester.app().deploymentJobs().projectId().getAsLong());
tester.clock().advance(Duration.ofMillis(1000));
ApplicationVersion revision2 = tester.newSubmission();
tester.runJob(systemTest);
tester.runJob(stagingTest);
tester.runJob(productionUsCentral1);
tester.tester().readyJobTrigger().maintain();
tester.configServer().throwOnNextPrepare(new ConfigServerException(URI.create("url"), "ERROR!", INVALID_APPLICATION_PACKAGE, null));
tester.runner().run();
assertEquals(deploymentFailed, tester.jobs().last(appId, productionUsEast3).get().status());
ZoneId usWest1 = productionUsWest1.zone(tester.tester().controller().system());
tester.configServer().convergeServices(appId, usWest1);
tester.configServer().convergeServices(testerId.id(), usWest1);
tester.setEndpoints(appId, usWest1);
tester.setEndpoints(testerId.id(), usWest1);
tester.runner().run();
tester.cloud().set(FAILURE);
tester.runner().run();
assertEquals(testFailure, tester.jobs().last(appId, productionUsWest1).get().status());
assertEquals(revision2, tester.app().deployments().get(productionUsCentral1.zone(tester.tester().controller().system())).applicationVersion());
assertEquals(revision1, tester.app().deployments().get(productionUsEast3.zone(tester.tester().controller().system())).applicationVersion());
assertEquals(revision2, tester.app().deployments().get(productionUsWest1.zone(tester.tester().controller().system())).applicationVersion());
tester.clock().advance(Duration.ofMillis(1000));
tester.newSubmission();
tester.runJob(systemTest);
tester.runJob(stagingTest);
tester.tester().readyJobTrigger().maintain();
tester.tester().readyJobTrigger().maintain();
tester.runner().run();
assertEquals(running, tester.jobs().last(appId, productionUsCentral1).get().status());
assertEquals(running, tester.jobs().last(appId, stagingTest).get().status());
tester.tester().controller().applications().deactivate(appId, stagingTest.zone(tester.tester().controller().system()));
tester.runner().run();
assertEquals(installationFailed, tester.jobs().last(appId, stagingTest).get().status());
tester.clock().advance(Duration.ofMillis(100_000));
tester.tester().readyJobTrigger().maintain();
assertEquals(installationFailed, tester.jobs().last(appId, stagingTest).get().status());
Version platform = new Version("7.1");
tester.tester().upgradeSystem(platform);
assertResponse(JobControllerApiHandlerHelper.runResponse(tester.jobs().runs(appId, stagingTest), URI.create("https:
assertResponse(JobControllerApiHandlerHelper.runDetailsResponse(tester.jobs(), tester.jobs().last(appId, productionUsEast3).get().id(), "0"), "us-east-3-log-without-first.json");
assertResponse(JobControllerApiHandlerHelper.jobTypeResponse(tester.tester().controller(), appId, URI.create("https:
tester.jobs().deploy(appId, JobType.devAwsUsEast2a, Optional.empty(), applicationPackage);
tester.runJob(JobType.devAwsUsEast2a);
assertResponse(JobControllerApiHandlerHelper.runResponse(tester.jobs().runs(appId, stagingTest), URI.create("https:
} | class JobControllerApiHandlerHelperTest {
@Test
private void compare(HttpResponse response, String expected) throws JSONException, IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
response.render(baos);
System.err.println(baos);
JSONObject actualJSON = new JSONObject(new String(baos.toByteArray()));
JSONObject expectedJSON = new JSONObject(expected);
assertEquals(expectedJSON.toString(), actualJSON.toString());
}
private void assertResponse(HttpResponse response, String fileName) {
try {
Path path = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/").resolve(fileName);
String expected = new String(Files.readAllBytes(path));
compare(response, expected);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
} | class JobControllerApiHandlerHelperTest {
@Test
private void compare(HttpResponse response, String expected) throws JSONException, IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
response.render(baos);
System.err.println(baos);
JSONObject actualJSON = new JSONObject(new String(baos.toByteArray()));
JSONObject expectedJSON = new JSONObject(expected);
assertEquals(expectedJSON.toString(), actualJSON.toString());
}
private void assertResponse(HttpResponse response, String fileName) {
try {
Path path = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/").resolve(fileName);
String expected = new String(Files.readAllBytes(path));
compare(response, expected);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
} |
That is the assumption, yes. | RPCTarget getTarget(long now) {
if (index >= targets.length) {
index = 0;
}
RPCTarget target = targets[index];
if (target.getJRTTarget().isValid()) {
target.addRef();
lastUse = now;
index++;
return target;
}
return null;
} | RPCTarget target = targets[index]; | RPCTarget getTarget(long now) {
if (index >= targets.length) {
index = 0;
}
RPCTarget target = targets[index];
if (target.getJRTTarget().isValid()) {
target.addRef();
lastUse = now;
index++;
return target;
}
return null;
} | class Entry implements Closeable {
private final RPCTarget [] targets;
private int index;
long lastUse;
Entry(RPCTarget [] targets, long lastUse) {
this.targets = targets;
this.lastUse = lastUse;
}
boolean isValid() {
for (RPCTarget target : targets) {
if ( ! target.getJRTTarget().isValid()) {
return false;
}
}
return true;
}
int getRefCount() {
int refCount = 0;
for (RPCTarget target : targets) {
refCount += target.getRefCount();
}
return refCount;
}
@Override
public void close() {
for (RPCTarget target : targets) {
target.subRef();
}
}
} | class Entry implements Closeable {
private final RPCTarget [] targets;
private int index;
long lastUse;
Entry(RPCTarget [] targets, long lastUse) {
this.targets = targets;
this.lastUse = lastUse;
}
boolean isValid() {
for (RPCTarget target : targets) {
if ( ! target.getJRTTarget().isValid()) {
return false;
}
}
return true;
}
int getRefCount() {
int refCount = 0;
for (RPCTarget target : targets) {
refCount += target.getRefCount();
}
return refCount;
}
@Override
public void close() {
for (RPCTarget target : targets) {
target.subRef();
}
}
} |
Setting references to null is generally not recommended, but I assume this is one of the cases where it really has an impact. I suggest to leave a comment so that other developers don't start setting references to null unnecessarily. (https://stackoverflow.com/questions/449409/does-assigning-objects-to-null-in-java-impact-garbage-collection) | public void close() {
if (configSub!=null) {
configSub.close();
configSub = null;
}
configSubscriber = null;
} | configSub = null; | public void close() {
if (configSub!=null) {
configSub.close();
configSub = null;
}
configSubscriber = null;
} | class Distribution {
private int[] distributionBitMasks = new int[65];
private Group nodeGraph;
private int redundancy;
private boolean distributorAutoOwnershipTransferOnWholeGroupDown = false;
private ConfigSubscriber configSub;
public Group getRootGroup() {
return nodeGraph;
}
public int getRedundancy() {
return redundancy;
}
private ConfigSubscriber.SingleSubscriber<StorDistributionConfig> configSubscriber = new ConfigSubscriber.SingleSubscriber<>() {
private int[] getGroupPath(String path) {
if (path.equals("invalid")) { return new int[0]; }
StringTokenizer st = new StringTokenizer(path, ".");
int[] p = new int[st.countTokens()];
for (int i=0; i<p.length; ++i) {
p[i] = Integer.valueOf(st.nextToken());
}
return p;
}
@Override
public void configure(StorDistributionConfig config) {
try{
Group root = null;
for (int i=0; i<config.group().size(); ++i) {
StorDistributionConfig.Group cg = config.group().get(i);
int[] path = new int[0];
if (root != null) {
path = getGroupPath(cg.index());
}
boolean isLeafGroup = (cg.nodes().size() > 0);
Group group;
int index = (path.length == 0 ? 0 : path[path.length - 1]);
if (isLeafGroup) {
group = new Group(index, cg.name());
List<ConfiguredNode> nodes = new ArrayList<>();
for (StorDistributionConfig.Group.Nodes node : cg.nodes()) {
nodes.add(new ConfiguredNode(node.index(), node.retired()));
}
group.setNodes(nodes);
} else {
group = new Group(index, cg.name(), new Group.Distribution(cg.partitions(), config.redundancy()));
}
group.setCapacity(cg.capacity());
if (path.length == 0) {
root = group;
} else {
Group parent = root;
for (int j=0; j<path.length - 1; ++j) {
parent = parent.getSubgroups().get(path[j]);
}
parent.addSubGroup(group);
}
}
if (root == null) {
throw new IllegalStateException("Got config that did not "
+ "specify even a root group. Need a root group at"
+ "\nminimum:\n" + config.toString());
}
root.calculateDistributionHashValues();
Distribution.this.nodeGraph = root;
Distribution.this.redundancy = config.redundancy();
distributorAutoOwnershipTransferOnWholeGroupDown = config.distributor_auto_ownership_transfer_on_whole_group_down();
} catch (ParseException e) {
throw (IllegalStateException) new IllegalStateException("Failed to parse config").initCause(e);
}
}
};
public Distribution(String configId) {
int mask = 0;
for (int i=0; i<=64; ++i) {
distributionBitMasks[i] = mask;
mask = (mask << 1) | 1;
}
try {
configSub = new ConfigSubscriber();
configSub.subscribe(configSubscriber, StorDistributionConfig.class, configId);
} catch (Throwable e) {
close();
throw e;
}
}
public Distribution(StorDistributionConfig config) {
int mask = 0;
for (int i=0; i<=64; ++i) {
distributionBitMasks[i] = mask;
mask = (mask << 1) | 1;
}
configSubscriber.configure(config);
}
private int getGroupSeed(BucketId bucket, ClusterState state, Group group) {
int seed = ((int) bucket.getRawId()) & distributionBitMasks[state.getDistributionBitCount()];
seed ^= group.getDistributionHash();
return seed;
}
private int getDistributorSeed(BucketId bucket, ClusterState state) {
return ((int) bucket.getRawId()) & distributionBitMasks[state.getDistributionBitCount()];
}
private int getStorageSeed(BucketId bucket, ClusterState state) {
int seed = ((int) bucket.getRawId()) & distributionBitMasks[state.getDistributionBitCount()];
if (bucket.getUsedBits() > 33) {
int usedBits = bucket.getUsedBits() - 1;
seed ^= (distributionBitMasks[usedBits - 32]
& (bucket.getRawId() >> 32)) << 6;
}
return seed;
}
private static class ScoredGroup implements Comparable<ScoredGroup> {
Group group;
double score;
ScoredGroup(Group g, double score) { this.group = g; this.score = score; }
@Override
public int compareTo(ScoredGroup o) {
return Double.valueOf(o.score).compareTo(score);
}
}
private static class ScoredNode {
int index;
int reliability;
double score;
ScoredNode(int index, int reliability, double score) { this.index = index; this.reliability = reliability; this.score = score; }
}
private static boolean allDistributorsDown(Group g, ClusterState clusterState) {
if (g.isLeafGroup()) {
for (ConfiguredNode node : g.getNodes()) {
NodeState ns = clusterState.getNodeState(new Node(NodeType.DISTRIBUTOR, node.index()));
if (ns.getState().oneOf("ui")) return false;
}
} else {
for (Group childGroup : g.getSubgroups().values()) {
if (!allDistributorsDown(childGroup, clusterState)) return false;
}
}
return true;
}
private Group getIdealDistributorGroup(BucketId bucket, ClusterState clusterState, Group parent, int redundancy) {
if (parent.isLeafGroup()) {
return parent;
}
int[] redundancyArray = parent.getDistribution().getRedundancyArray(redundancy);
TreeSet<ScoredGroup> results = new TreeSet<>();
int seed = getGroupSeed(bucket, clusterState, parent);
RandomGen random = new RandomGen(seed);
int currentIndex = 0;
for(Group g : parent.getSubgroups().values()) {
while (g.getIndex() < currentIndex++) random.nextDouble();
double score = random.nextDouble();
if (Math.abs(g.getCapacity() - 1.0) > 0.0000001) {
score = Math.pow(score, 1.0 / g.getCapacity());
}
results.add(new ScoredGroup(g, score));
}
if (distributorAutoOwnershipTransferOnWholeGroupDown) {
while (!results.isEmpty() && allDistributorsDown(results.first().group, clusterState)) {
results.remove(results.first());
}
}
if (results.isEmpty()) {
return null;
}
return getIdealDistributorGroup(bucket, clusterState, results.first().group, redundancyArray[0]);
}
private static class ResultGroup implements Comparable<ResultGroup> {
Group group;
int redundancy;
ResultGroup(Group group, int redundancy) {
this.group = group;
this.redundancy = redundancy;
}
@Override
public int compareTo(ResultGroup o) {
return group.compareTo(o.group);
}
}
private void getIdealGroups(BucketId bucketId, ClusterState clusterState, Group parent,
int redundancy, List<ResultGroup> results) {
if (parent.isLeafGroup()) {
results.add(new ResultGroup(parent, redundancy));
return;
}
int[] redundancyArray = parent.getDistribution().getRedundancyArray(redundancy);
List<ScoredGroup> tmpResults = new ArrayList<>();
for (int i = 0; i < redundancyArray.length; ++i) {
tmpResults.add(new ScoredGroup(null, 0.0));
}
int seed = getGroupSeed(bucketId, clusterState, parent);
RandomGen random = new RandomGen(seed);
int currentIndex = 0;
Map<Integer, Group> subGroups = parent.getSubgroups();
for (Map.Entry<Integer, Group> group : subGroups.entrySet()) {
while (group.getKey() < currentIndex++) {
random.nextDouble();
}
double score = random.nextDouble();
if (group.getValue().getCapacity() != 1) {
score = Math.pow(score, 1.0 / group.getValue().getCapacity());
}
if (score > tmpResults.get(tmpResults.size() - 1).score) {
tmpResults.add(new ScoredGroup(group.getValue(), score));
Collections.sort(tmpResults);
tmpResults.remove(tmpResults.size() - 1);
}
}
for (int i = 0; i < tmpResults.size(); ++i) {
Group group = tmpResults.get(i).group;
if (group != null) {
getIdealGroups(bucketId, clusterState, group, redundancyArray[i], results);
}
}
}
private int getDiskSeed(BucketId bucket, int nodeIndex) {
long currentid = bucket.withoutCountBits();
byte[] ordered = new byte[8];
ordered[0] = (byte)(currentid >> (0*8));
ordered[1] = (byte)(currentid >> (1*8));
ordered[2] = (byte)(currentid >> (2*8));
ordered[3] = (byte)(currentid >> (3*8));
ordered[4] = (byte)(currentid >> (4*8));
ordered[5] = (byte)(currentid >> (5*8));
ordered[6] = (byte)(currentid >> (6*8));
ordered[7] = (byte)(currentid >> (7*8));
int initval = (1664525 * nodeIndex + 0xdeadbeef);
return BobHash.hash(ordered, initval);
}
/**
* This function should only depend on disk distribution and node index. It is
* assumed that any other change, for instance in hierarchical grouping, does
* not change disk index on disk.
*/
int getIdealDisk(NodeState nodeState, int nodeIndex, BucketId bucket) {
if (nodeState.getDiskCount() < 2) {
if (nodeState.getDiskCount() == 1) {
return 0;
}
throw new IllegalArgumentException(
"Cannot pick ideal disk without knowing disk count.");
}
RandomGen randomizer = new RandomGen(getDiskSeed(bucket, nodeIndex));
double maxScore = 0.0;
int idealDisk = 0xffff;
for (int i=0, n=nodeState.getDiskCount(); i<n; ++i) {
double score = randomizer.nextDouble();
DiskState diskState = (nodeState.getDiskState(i));
if (diskState.getCapacity() != 1.0) {
score = Math.pow(score,
1.0 / diskState.getCapacity());
}
if (score > maxScore) {
maxScore = score;
idealDisk = i;
}
}
return idealDisk;
}
List<Integer> getIdealStorageNodes(ClusterState clusterState, BucketId bucket,
String upStates) throws TooFewBucketBitsInUseException {
List<Integer> resultNodes = new ArrayList<>();
if (bucket.getUsedBits() < clusterState.getDistributionBitCount()) {
String msg = "Cannot get ideal state for bucket " + bucket + " using "
+ bucket.getUsedBits() + " bits when cluster uses "
+ clusterState.getDistributionBitCount() + " distribution bits.";
throw new TooFewBucketBitsInUseException(msg);
}
List<ResultGroup> groupDistribution = new ArrayList<>();
getIdealGroups(bucket, clusterState, nodeGraph, redundancy, groupDistribution);
int seed = getStorageSeed(bucket, clusterState);
RandomGen random = new RandomGen(seed);
int randomIndex = 0;
for (ResultGroup group : groupDistribution) {
int redundancy = group.redundancy;
Collection<ConfiguredNode> nodes = group.group.getNodes();
LinkedList<ScoredNode> tmpResults = new LinkedList<>();
for (int i = 0; i < redundancy; ++i) {
tmpResults.add(new ScoredNode(0, 0, 0.0));
}
for (ConfiguredNode configuredNode : nodes) {
NodeState nodeState = clusterState.getNodeState(new Node(NodeType.STORAGE, configuredNode.index()));
if (!nodeState.getState().oneOf(upStates)) {
continue;
}
if (nodeState.isAnyDiskDown()) {
int idealDiskIndex = getIdealDisk(nodeState, configuredNode.index(), bucket);
if (nodeState.getDiskState(idealDiskIndex).getState() != State.UP) {
continue;
}
}
if (configuredNode.index() != randomIndex) {
if (configuredNode.index() < randomIndex) {
random.setSeed(seed);
randomIndex = 0;
}
for (int k = randomIndex; k < configuredNode.index(); ++k) {
random.nextDouble();
}
randomIndex = configuredNode.index();
}
double score = random.nextDouble();
++randomIndex;
if (nodeState.getCapacity() != 1.0) {
score = Math.pow(score, 1.0 / nodeState.getCapacity());
}
if (score > tmpResults.getLast().score) {
for (int i = 0; i < tmpResults.size(); ++i) {
if (score > tmpResults.get(i).score) {
tmpResults.add(i, new ScoredNode(configuredNode.index(), nodeState.getReliability(), score));
break;
}
}
tmpResults.removeLast();
}
}
for (ScoredNode node : tmpResults) {
resultNodes.add(node.index);
}
}
return resultNodes;
}
public static class TooFewBucketBitsInUseException extends Exception {
TooFewBucketBitsInUseException(String message) {
super(message);
}
}
public static class NoDistributorsAvailableException extends Exception {
NoDistributorsAvailableException(String message) {
super(message);
}
}
public int getIdealDistributorNode(ClusterState state, BucketId bucket, String upStates) throws TooFewBucketBitsInUseException, NoDistributorsAvailableException {
if (bucket.getUsedBits() < state.getDistributionBitCount()) {
throw new TooFewBucketBitsInUseException("Cannot get ideal state for bucket " + bucket + " using " + bucket.getUsedBits()
+ " bits when cluster uses " + state.getDistributionBitCount() + " distribution bits.");
}
Group idealGroup = getIdealDistributorGroup(bucket, state, nodeGraph, redundancy);
if (idealGroup == null) {
throw new NoDistributorsAvailableException("No distributors available in cluster state version " + state.getVersion());
}
int seed = getDistributorSeed(bucket, state);
RandomGen random = new RandomGen(seed);
int randomIndex = 0;
List<ConfiguredNode> configuredNodes = idealGroup.getNodes();
ScoredNode node = new ScoredNode(0, 0, 0);
for (ConfiguredNode configuredNode : configuredNodes) {
NodeState nodeState = state.getNodeState(new Node(NodeType.DISTRIBUTOR, configuredNode.index()));
if (!nodeState.getState().oneOf(upStates)) continue;
if (configuredNode.index() != randomIndex) {
if (configuredNode.index() < randomIndex) {
random.setSeed(seed);
randomIndex = 0;
}
for (int k=randomIndex; k < configuredNode.index(); ++k) {
random.nextDouble();
}
randomIndex = configuredNode.index();
}
double score = random.nextDouble();
++randomIndex;
if (Math.abs(nodeState.getCapacity() - 1.0) > 0.0000001) {
score = Math.pow(score, 1.0 / nodeState.getCapacity());
}
if (score > node.score) {
node = new ScoredNode(configuredNode.index(), 1, score);
}
}
if (node.reliability == 0) {
throw new NoDistributorsAvailableException(
"No available distributors in any of the given upstates '"
+ upStates + "'.");
}
return node.index;
}
private boolean visitGroups(GroupVisitor visitor, Map<Integer, Group> groups) {
for (Group g : groups.values()) {
if (!visitor.visitGroup(g)) return false;
if (!g.isLeafGroup()) {
if (!visitGroups(visitor, g.getSubgroups())) {
return false;
}
}
}
return true;
}
public void visitGroups(GroupVisitor visitor) {
Map<Integer, Group> groups = new TreeMap<>();
groups.put(nodeGraph.getIndex(), nodeGraph);
visitGroups(visitor, groups);
}
public Set<ConfiguredNode> getNodes() {
final Set<ConfiguredNode> nodes = new HashSet<>();
GroupVisitor visitor = new GroupVisitor() {
@Override
public boolean visitGroup(Group g) {
if (g.isLeafGroup()) {
nodes.addAll(g.getNodes());
}
return true;
}
};
visitGroups(visitor);
return nodes;
}
public static String getDefaultDistributionConfig(int redundancy, int nodeCount) {
return getDefaultDistributionConfig(redundancy, nodeCount, StorDistributionConfig.Disk_distribution.MODULO_BID);
}
public static String getDefaultDistributionConfig(int redundancy, int nodeCount, StorDistributionConfig.Disk_distribution.Enum diskDistribution) {
StringBuilder sb = new StringBuilder();
sb.append("raw:redundancy ").append(redundancy).append("\n")
.append("group[1]\n")
.append("group[0].index \"invalid\"\n")
.append("group[0].name \"invalid\"\n")
.append("group[0].partitions \"*\"\n")
.append("group[0].nodes[").append(nodeCount).append("]\n");
for (int i=0; i<nodeCount; ++i) {
sb.append("group[0].nodes[").append(i).append("].index ").append(i).append("\n");
}
sb.append("disk_distribution ").append(diskDistribution.toString()).append("\n");
return sb.toString();
}
public static String getSimpleGroupConfig(int redundancy, int nodeCount) {
return getSimpleGroupConfig(redundancy, nodeCount, StorDistributionConfig.Disk_distribution.Enum.MODULO_BID);
}
private static String getSimpleGroupConfig(int redundancy, int nodeCount, StorDistributionConfig.Disk_distribution.Enum diskDistribution) {
StringBuilder sb = new StringBuilder();
sb.append("raw:redundancy ").append(redundancy).append("\n").append("group[4]\n");
int group = 0;
sb.append("group[" + group + "].index \"invalid\"\n")
.append("group[" + group + "].name \"invalid\"\n")
.append("group[" + group + "].partitions \"1|*\"\n");
++group;
sb.append("group[" + group + "].index \"0\"\n")
.append("group[" + group + "].name \"east\"\n")
.append("group[" + group + "].partitions \"*\"\n");
++group;
sb.append("group[" + group + "].index \"0.0\"\n")
.append("group[" + group + "].name \"g1\"\n")
.append("group[" + group + "].partitions \"*\"\n")
.append("group[" + group + "].nodes[").append((nodeCount + 1) / 2).append("]\n");
for (int i=0; i<nodeCount; i += 2) {
sb.append("group[" + group + "].nodes[").append(i / 2).append("].index ").append(i).append("\n");
}
++group;
sb.append("group[" + group + "].index \"0.1\"\n")
.append("group[" + group + "].name \"g2\"\n")
.append("group[" + group + "].partitions \"*\"\n")
.append("group[" + group + "].nodes[").append(nodeCount / 2).append("]\n");
for (int i=1; i<nodeCount; i += 2) {
sb.append("group[" + group + "].nodes[").append(i / 2).append("].index ").append(i).append("\n");
}
sb.append("disk_distribution ").append(diskDistribution.toString()).append("\n");
return sb.toString();
}
} | class Distribution {
private int[] distributionBitMasks = new int[65];
private Group nodeGraph;
private int redundancy;
private boolean distributorAutoOwnershipTransferOnWholeGroupDown = false;
private ConfigSubscriber configSub;
public Group getRootGroup() {
return nodeGraph;
}
public int getRedundancy() {
return redundancy;
}
private ConfigSubscriber.SingleSubscriber<StorDistributionConfig> configSubscriber = new ConfigSubscriber.SingleSubscriber<>() {
private int[] getGroupPath(String path) {
if (path.equals("invalid")) { return new int[0]; }
StringTokenizer st = new StringTokenizer(path, ".");
int[] p = new int[st.countTokens()];
for (int i=0; i<p.length; ++i) {
p[i] = Integer.valueOf(st.nextToken());
}
return p;
}
@Override
public void configure(StorDistributionConfig config) {
try{
Group root = null;
for (int i=0; i<config.group().size(); ++i) {
StorDistributionConfig.Group cg = config.group().get(i);
int[] path = new int[0];
if (root != null) {
path = getGroupPath(cg.index());
}
boolean isLeafGroup = (cg.nodes().size() > 0);
Group group;
int index = (path.length == 0 ? 0 : path[path.length - 1]);
if (isLeafGroup) {
group = new Group(index, cg.name());
List<ConfiguredNode> nodes = new ArrayList<>();
for (StorDistributionConfig.Group.Nodes node : cg.nodes()) {
nodes.add(new ConfiguredNode(node.index(), node.retired()));
}
group.setNodes(nodes);
} else {
group = new Group(index, cg.name(), new Group.Distribution(cg.partitions(), config.redundancy()));
}
group.setCapacity(cg.capacity());
if (path.length == 0) {
root = group;
} else {
Group parent = root;
for (int j=0; j<path.length - 1; ++j) {
parent = parent.getSubgroups().get(path[j]);
}
parent.addSubGroup(group);
}
}
if (root == null) {
throw new IllegalStateException("Got config that did not "
+ "specify even a root group. Need a root group at"
+ "\nminimum:\n" + config.toString());
}
root.calculateDistributionHashValues();
Distribution.this.nodeGraph = root;
Distribution.this.redundancy = config.redundancy();
distributorAutoOwnershipTransferOnWholeGroupDown = config.distributor_auto_ownership_transfer_on_whole_group_down();
} catch (ParseException e) {
throw (IllegalStateException) new IllegalStateException("Failed to parse config").initCause(e);
}
}
};
public Distribution(String configId) {
int mask = 0;
for (int i=0; i<=64; ++i) {
distributionBitMasks[i] = mask;
mask = (mask << 1) | 1;
}
try {
configSub = new ConfigSubscriber();
configSub.subscribe(configSubscriber, StorDistributionConfig.class, configId);
} catch (Throwable e) {
close();
throw e;
}
}
public Distribution(StorDistributionConfig config) {
int mask = 0;
for (int i=0; i<=64; ++i) {
distributionBitMasks[i] = mask;
mask = (mask << 1) | 1;
}
configSubscriber.configure(config);
}
private int getGroupSeed(BucketId bucket, ClusterState state, Group group) {
int seed = ((int) bucket.getRawId()) & distributionBitMasks[state.getDistributionBitCount()];
seed ^= group.getDistributionHash();
return seed;
}
private int getDistributorSeed(BucketId bucket, ClusterState state) {
return ((int) bucket.getRawId()) & distributionBitMasks[state.getDistributionBitCount()];
}
private int getStorageSeed(BucketId bucket, ClusterState state) {
int seed = ((int) bucket.getRawId()) & distributionBitMasks[state.getDistributionBitCount()];
if (bucket.getUsedBits() > 33) {
int usedBits = bucket.getUsedBits() - 1;
seed ^= (distributionBitMasks[usedBits - 32]
& (bucket.getRawId() >> 32)) << 6;
}
return seed;
}
private static class ScoredGroup implements Comparable<ScoredGroup> {
Group group;
double score;
ScoredGroup(Group g, double score) { this.group = g; this.score = score; }
@Override
public int compareTo(ScoredGroup o) {
return Double.valueOf(o.score).compareTo(score);
}
}
private static class ScoredNode {
int index;
int reliability;
double score;
ScoredNode(int index, int reliability, double score) { this.index = index; this.reliability = reliability; this.score = score; }
}
private static boolean allDistributorsDown(Group g, ClusterState clusterState) {
if (g.isLeafGroup()) {
for (ConfiguredNode node : g.getNodes()) {
NodeState ns = clusterState.getNodeState(new Node(NodeType.DISTRIBUTOR, node.index()));
if (ns.getState().oneOf("ui")) return false;
}
} else {
for (Group childGroup : g.getSubgroups().values()) {
if (!allDistributorsDown(childGroup, clusterState)) return false;
}
}
return true;
}
private Group getIdealDistributorGroup(BucketId bucket, ClusterState clusterState, Group parent, int redundancy) {
if (parent.isLeafGroup()) {
return parent;
}
int[] redundancyArray = parent.getDistribution().getRedundancyArray(redundancy);
TreeSet<ScoredGroup> results = new TreeSet<>();
int seed = getGroupSeed(bucket, clusterState, parent);
RandomGen random = new RandomGen(seed);
int currentIndex = 0;
for(Group g : parent.getSubgroups().values()) {
while (g.getIndex() < currentIndex++) random.nextDouble();
double score = random.nextDouble();
if (Math.abs(g.getCapacity() - 1.0) > 0.0000001) {
score = Math.pow(score, 1.0 / g.getCapacity());
}
results.add(new ScoredGroup(g, score));
}
if (distributorAutoOwnershipTransferOnWholeGroupDown) {
while (!results.isEmpty() && allDistributorsDown(results.first().group, clusterState)) {
results.remove(results.first());
}
}
if (results.isEmpty()) {
return null;
}
return getIdealDistributorGroup(bucket, clusterState, results.first().group, redundancyArray[0]);
}
private static class ResultGroup implements Comparable<ResultGroup> {
Group group;
int redundancy;
ResultGroup(Group group, int redundancy) {
this.group = group;
this.redundancy = redundancy;
}
@Override
public int compareTo(ResultGroup o) {
return group.compareTo(o.group);
}
}
private void getIdealGroups(BucketId bucketId, ClusterState clusterState, Group parent,
int redundancy, List<ResultGroup> results) {
if (parent.isLeafGroup()) {
results.add(new ResultGroup(parent, redundancy));
return;
}
int[] redundancyArray = parent.getDistribution().getRedundancyArray(redundancy);
List<ScoredGroup> tmpResults = new ArrayList<>();
for (int i = 0; i < redundancyArray.length; ++i) {
tmpResults.add(new ScoredGroup(null, 0.0));
}
int seed = getGroupSeed(bucketId, clusterState, parent);
RandomGen random = new RandomGen(seed);
int currentIndex = 0;
Map<Integer, Group> subGroups = parent.getSubgroups();
for (Map.Entry<Integer, Group> group : subGroups.entrySet()) {
while (group.getKey() < currentIndex++) {
random.nextDouble();
}
double score = random.nextDouble();
if (group.getValue().getCapacity() != 1) {
score = Math.pow(score, 1.0 / group.getValue().getCapacity());
}
if (score > tmpResults.get(tmpResults.size() - 1).score) {
tmpResults.add(new ScoredGroup(group.getValue(), score));
Collections.sort(tmpResults);
tmpResults.remove(tmpResults.size() - 1);
}
}
for (int i = 0; i < tmpResults.size(); ++i) {
Group group = tmpResults.get(i).group;
if (group != null) {
getIdealGroups(bucketId, clusterState, group, redundancyArray[i], results);
}
}
}
private int getDiskSeed(BucketId bucket, int nodeIndex) {
long currentid = bucket.withoutCountBits();
byte[] ordered = new byte[8];
ordered[0] = (byte)(currentid >> (0*8));
ordered[1] = (byte)(currentid >> (1*8));
ordered[2] = (byte)(currentid >> (2*8));
ordered[3] = (byte)(currentid >> (3*8));
ordered[4] = (byte)(currentid >> (4*8));
ordered[5] = (byte)(currentid >> (5*8));
ordered[6] = (byte)(currentid >> (6*8));
ordered[7] = (byte)(currentid >> (7*8));
int initval = (1664525 * nodeIndex + 0xdeadbeef);
return BobHash.hash(ordered, initval);
}
/**
* This function should only depend on disk distribution and node index. It is
* assumed that any other change, for instance in hierarchical grouping, does
* not change disk index on disk.
*/
int getIdealDisk(NodeState nodeState, int nodeIndex, BucketId bucket) {
if (nodeState.getDiskCount() < 2) {
if (nodeState.getDiskCount() == 1) {
return 0;
}
throw new IllegalArgumentException(
"Cannot pick ideal disk without knowing disk count.");
}
RandomGen randomizer = new RandomGen(getDiskSeed(bucket, nodeIndex));
double maxScore = 0.0;
int idealDisk = 0xffff;
for (int i=0, n=nodeState.getDiskCount(); i<n; ++i) {
double score = randomizer.nextDouble();
DiskState diskState = (nodeState.getDiskState(i));
if (diskState.getCapacity() != 1.0) {
score = Math.pow(score,
1.0 / diskState.getCapacity());
}
if (score > maxScore) {
maxScore = score;
idealDisk = i;
}
}
return idealDisk;
}
List<Integer> getIdealStorageNodes(ClusterState clusterState, BucketId bucket,
String upStates) throws TooFewBucketBitsInUseException {
List<Integer> resultNodes = new ArrayList<>();
if (bucket.getUsedBits() < clusterState.getDistributionBitCount()) {
String msg = "Cannot get ideal state for bucket " + bucket + " using "
+ bucket.getUsedBits() + " bits when cluster uses "
+ clusterState.getDistributionBitCount() + " distribution bits.";
throw new TooFewBucketBitsInUseException(msg);
}
List<ResultGroup> groupDistribution = new ArrayList<>();
getIdealGroups(bucket, clusterState, nodeGraph, redundancy, groupDistribution);
int seed = getStorageSeed(bucket, clusterState);
RandomGen random = new RandomGen(seed);
int randomIndex = 0;
for (ResultGroup group : groupDistribution) {
int redundancy = group.redundancy;
Collection<ConfiguredNode> nodes = group.group.getNodes();
LinkedList<ScoredNode> tmpResults = new LinkedList<>();
for (int i = 0; i < redundancy; ++i) {
tmpResults.add(new ScoredNode(0, 0, 0.0));
}
for (ConfiguredNode configuredNode : nodes) {
NodeState nodeState = clusterState.getNodeState(new Node(NodeType.STORAGE, configuredNode.index()));
if (!nodeState.getState().oneOf(upStates)) {
continue;
}
if (nodeState.isAnyDiskDown()) {
int idealDiskIndex = getIdealDisk(nodeState, configuredNode.index(), bucket);
if (nodeState.getDiskState(idealDiskIndex).getState() != State.UP) {
continue;
}
}
if (configuredNode.index() != randomIndex) {
if (configuredNode.index() < randomIndex) {
random.setSeed(seed);
randomIndex = 0;
}
for (int k = randomIndex; k < configuredNode.index(); ++k) {
random.nextDouble();
}
randomIndex = configuredNode.index();
}
double score = random.nextDouble();
++randomIndex;
if (nodeState.getCapacity() != 1.0) {
score = Math.pow(score, 1.0 / nodeState.getCapacity());
}
if (score > tmpResults.getLast().score) {
for (int i = 0; i < tmpResults.size(); ++i) {
if (score > tmpResults.get(i).score) {
tmpResults.add(i, new ScoredNode(configuredNode.index(), nodeState.getReliability(), score));
break;
}
}
tmpResults.removeLast();
}
}
for (ScoredNode node : tmpResults) {
resultNodes.add(node.index);
}
}
return resultNodes;
}
public static class TooFewBucketBitsInUseException extends Exception {
TooFewBucketBitsInUseException(String message) {
super(message);
}
}
public static class NoDistributorsAvailableException extends Exception {
NoDistributorsAvailableException(String message) {
super(message);
}
}
public int getIdealDistributorNode(ClusterState state, BucketId bucket, String upStates) throws TooFewBucketBitsInUseException, NoDistributorsAvailableException {
if (bucket.getUsedBits() < state.getDistributionBitCount()) {
throw new TooFewBucketBitsInUseException("Cannot get ideal state for bucket " + bucket + " using " + bucket.getUsedBits()
+ " bits when cluster uses " + state.getDistributionBitCount() + " distribution bits.");
}
Group idealGroup = getIdealDistributorGroup(bucket, state, nodeGraph, redundancy);
if (idealGroup == null) {
throw new NoDistributorsAvailableException("No distributors available in cluster state version " + state.getVersion());
}
int seed = getDistributorSeed(bucket, state);
RandomGen random = new RandomGen(seed);
int randomIndex = 0;
List<ConfiguredNode> configuredNodes = idealGroup.getNodes();
ScoredNode node = new ScoredNode(0, 0, 0);
for (ConfiguredNode configuredNode : configuredNodes) {
NodeState nodeState = state.getNodeState(new Node(NodeType.DISTRIBUTOR, configuredNode.index()));
if (!nodeState.getState().oneOf(upStates)) continue;
if (configuredNode.index() != randomIndex) {
if (configuredNode.index() < randomIndex) {
random.setSeed(seed);
randomIndex = 0;
}
for (int k=randomIndex; k < configuredNode.index(); ++k) {
random.nextDouble();
}
randomIndex = configuredNode.index();
}
double score = random.nextDouble();
++randomIndex;
if (Math.abs(nodeState.getCapacity() - 1.0) > 0.0000001) {
score = Math.pow(score, 1.0 / nodeState.getCapacity());
}
if (score > node.score) {
node = new ScoredNode(configuredNode.index(), 1, score);
}
}
if (node.reliability == 0) {
throw new NoDistributorsAvailableException(
"No available distributors in any of the given upstates '"
+ upStates + "'.");
}
return node.index;
}
private boolean visitGroups(GroupVisitor visitor, Map<Integer, Group> groups) {
for (Group g : groups.values()) {
if (!visitor.visitGroup(g)) return false;
if (!g.isLeafGroup()) {
if (!visitGroups(visitor, g.getSubgroups())) {
return false;
}
}
}
return true;
}
public void visitGroups(GroupVisitor visitor) {
Map<Integer, Group> groups = new TreeMap<>();
groups.put(nodeGraph.getIndex(), nodeGraph);
visitGroups(visitor, groups);
}
public Set<ConfiguredNode> getNodes() {
final Set<ConfiguredNode> nodes = new HashSet<>();
GroupVisitor visitor = new GroupVisitor() {
@Override
public boolean visitGroup(Group g) {
if (g.isLeafGroup()) {
nodes.addAll(g.getNodes());
}
return true;
}
};
visitGroups(visitor);
return nodes;
}
public static String getDefaultDistributionConfig(int redundancy, int nodeCount) {
return getDefaultDistributionConfig(redundancy, nodeCount, StorDistributionConfig.Disk_distribution.MODULO_BID);
}
public static String getDefaultDistributionConfig(int redundancy, int nodeCount, StorDistributionConfig.Disk_distribution.Enum diskDistribution) {
StringBuilder sb = new StringBuilder();
sb.append("raw:redundancy ").append(redundancy).append("\n")
.append("group[1]\n")
.append("group[0].index \"invalid\"\n")
.append("group[0].name \"invalid\"\n")
.append("group[0].partitions \"*\"\n")
.append("group[0].nodes[").append(nodeCount).append("]\n");
for (int i=0; i<nodeCount; ++i) {
sb.append("group[0].nodes[").append(i).append("].index ").append(i).append("\n");
}
sb.append("disk_distribution ").append(diskDistribution.toString()).append("\n");
return sb.toString();
}
public static String getSimpleGroupConfig(int redundancy, int nodeCount) {
return getSimpleGroupConfig(redundancy, nodeCount, StorDistributionConfig.Disk_distribution.Enum.MODULO_BID);
}
private static String getSimpleGroupConfig(int redundancy, int nodeCount, StorDistributionConfig.Disk_distribution.Enum diskDistribution) {
StringBuilder sb = new StringBuilder();
sb.append("raw:redundancy ").append(redundancy).append("\n").append("group[4]\n");
int group = 0;
sb.append("group[" + group + "].index \"invalid\"\n")
.append("group[" + group + "].name \"invalid\"\n")
.append("group[" + group + "].partitions \"1|*\"\n");
++group;
sb.append("group[" + group + "].index \"0\"\n")
.append("group[" + group + "].name \"east\"\n")
.append("group[" + group + "].partitions \"*\"\n");
++group;
sb.append("group[" + group + "].index \"0.0\"\n")
.append("group[" + group + "].name \"g1\"\n")
.append("group[" + group + "].partitions \"*\"\n")
.append("group[" + group + "].nodes[").append((nodeCount + 1) / 2).append("]\n");
for (int i=0; i<nodeCount; i += 2) {
sb.append("group[" + group + "].nodes[").append(i / 2).append("].index ").append(i).append("\n");
}
++group;
sb.append("group[" + group + "].index \"0.1\"\n")
.append("group[" + group + "].name \"g2\"\n")
.append("group[" + group + "].partitions \"*\"\n")
.append("group[" + group + "].nodes[").append(nodeCount / 2).append("]\n");
for (int i=1; i<nodeCount; i += 2) {
sb.append("group[" + group + "].nodes[").append(i / 2).append("].index ").append(i).append("\n");
}
sb.append("disk_distribution ").append(diskDistribution.toString()).append("\n");
return sb.toString();
}
} |
Could consider `getAcquire()` instead of just `get` on these, with matching `setRelease()` instead of `set`. Since these are immutable we shouldn't need a full sequentially consistent memory barrier. | public Group getRootGroup() {
return config.get().nodeGraph;
} | return config.get().nodeGraph; | public Group getRootGroup() {
return config.getAcquire().nodeGraph;
} | class Config {
Config(Group nodeGraph, int redundancy, boolean distributorAutoOwnershipTransferOnWholeGroupDown) {
this.nodeGraph = nodeGraph;
this.redundancy = redundancy;
this.distributorAutoOwnershipTransferOnWholeGroupDown = distributorAutoOwnershipTransferOnWholeGroupDown;
}
private final Group nodeGraph;
private final int redundancy;
private final boolean distributorAutoOwnershipTransferOnWholeGroupDown;
} | class Config {
Config(Group nodeGraph, int redundancy, boolean distributorAutoOwnershipTransferOnWholeGroupDown) {
this.nodeGraph = nodeGraph;
this.redundancy = redundancy;
this.distributorAutoOwnershipTransferOnWholeGroupDown = distributorAutoOwnershipTransferOnWholeGroupDown;
}
private final Group nodeGraph;
private final int redundancy;
private final boolean distributorAutoOwnershipTransferOnWholeGroupDown;
} |
Fixed | public Group getRootGroup() {
return config.get().nodeGraph;
} | return config.get().nodeGraph; | public Group getRootGroup() {
return config.getAcquire().nodeGraph;
} | class Config {
Config(Group nodeGraph, int redundancy, boolean distributorAutoOwnershipTransferOnWholeGroupDown) {
this.nodeGraph = nodeGraph;
this.redundancy = redundancy;
this.distributorAutoOwnershipTransferOnWholeGroupDown = distributorAutoOwnershipTransferOnWholeGroupDown;
}
private final Group nodeGraph;
private final int redundancy;
private final boolean distributorAutoOwnershipTransferOnWholeGroupDown;
} | class Config {
Config(Group nodeGraph, int redundancy, boolean distributorAutoOwnershipTransferOnWholeGroupDown) {
this.nodeGraph = nodeGraph;
this.redundancy = redundancy;
this.distributorAutoOwnershipTransferOnWholeGroupDown = distributorAutoOwnershipTransferOnWholeGroupDown;
}
private final Group nodeGraph;
private final int redundancy;
private final boolean distributorAutoOwnershipTransferOnWholeGroupDown;
} |
Pure round-robin is presumably a good enough choice here since we're assuming that load to the same target is pretty uniformly distributed across individual requests? | RPCTarget getTarget(long now) {
if (index >= targets.length) {
index = 0;
}
RPCTarget target = targets[index];
if (target.getJRTTarget().isValid()) {
target.addRef();
lastUse = now;
index++;
return target;
}
return null;
} | RPCTarget target = targets[index]; | RPCTarget getTarget(long now) {
if (index >= targets.length) {
index = 0;
}
RPCTarget target = targets[index];
if (target.getJRTTarget().isValid()) {
target.addRef();
lastUse = now;
index++;
return target;
}
return null;
} | class Entry implements Closeable {
private final RPCTarget [] targets;
private int index;
long lastUse;
Entry(RPCTarget [] targets, long lastUse) {
this.targets = targets;
this.lastUse = lastUse;
}
boolean isValid() {
for (RPCTarget target : targets) {
if ( ! target.getJRTTarget().isValid()) {
return false;
}
}
return true;
}
int getRefCount() {
int refCount = 0;
for (RPCTarget target : targets) {
refCount += target.getRefCount();
}
return refCount;
}
@Override
public void close() {
for (RPCTarget target : targets) {
target.subRef();
}
}
} | class Entry implements Closeable {
private final RPCTarget [] targets;
private int index;
long lastUse;
Entry(RPCTarget [] targets, long lastUse) {
this.targets = targets;
this.lastUse = lastUse;
}
boolean isValid() {
for (RPCTarget target : targets) {
if ( ! target.getJRTTarget().isValid()) {
return false;
}
}
return true;
}
int getRefCount() {
int refCount = 0;
for (RPCTarget target : targets) {
refCount += target.getRefCount();
}
return refCount;
}
@Override
public void close() {
for (RPCTarget target : targets) {
target.subRef();
}
}
} |
Can the port count be extracted from `portsMeta`? This method and the getters below seems to duplicate the logic/info already stored in `portsMeta`. | public int getPortCount() {
int httpPorts = (getHttp() != null) ? 0 : numHttpServerPorts + 2;
return httpPorts + numMessageBusPorts() + numRpcPorts();
} | return httpPorts + numMessageBusPorts() + numRpcPorts(); | public int getPortCount() {
int httpPorts = (getHttp() != null) ? 0 : numHttpServerPorts + 2;
return httpPorts + numMessageBusPorts() + numRpcPorts();
} | class Container extends AbstractService implements
QrConfig.Producer,
ComponentsConfig.Producer,
JdiscBindingsConfig.Producer,
ContainerHttpConfig.Producer,
ContainerMbusConfig.Producer {
public static final int BASEPORT = Defaults.getDefaults().vespaWebServicePort();
public static final String SINGLENODE_CONTAINER_SERVICESPEC = "default_singlenode_container";
protected final AbstractConfigProducer parent;
private final String name;
private boolean requireSpecificPorts = true;
private String clusterName = null;
private Optional<String> hostResponseHeaderKey = Optional.empty();
/** Whether this node has been marked as retired (e.g, will be removed) */
private final boolean retired;
/** The unique index of this node */
private final int index;
private final ComponentGroup<Handler<?>> handlers = new ComponentGroup<>(this, "handler");
private final ComponentGroup<Component<?, ?>> components = new ComponentGroup<>(this, "components");
private final JettyHttpServer defaultHttpServer = new JettyHttpServer(new ComponentId("DefaultHttpServer"));
protected final int numHttpServerPorts;
protected Container(AbstractConfigProducer parent, String name, int index) {
this(parent, name, false, index);
}
protected Container(AbstractConfigProducer parent, String name, boolean retired, int index) {
super(parent, name);
this.name = name;
this.parent = parent;
this.retired = retired;
this.index = index;
if (getHttp() == null) {
numHttpServerPorts = 2;
addChild(defaultHttpServer);
} else if (getHttp().getHttpServer() == null) {
numHttpServerPorts = 0;
} else {
numHttpServerPorts = getHttp().getHttpServer().getConnectorFactories().size();
}
addBuiltinHandlers();
addChild(new SimpleComponent("com.yahoo.container.jdisc.ConfiguredApplication$ApplicationContext"));
}
/** True if this container is retired (slated for removal) */
public boolean isRetired() { return retired; }
public ComponentGroup<Handler<?>> getHandlers() {
return handlers;
}
public ComponentGroup getComponents() {
return components;
}
public final void addComponent(Component c) {
components.addComponent(c);
}
public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) {
addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec)));
}
public final void addHandler(Handler h) {
handlers.addComponent(h);
}
/**
* If present, this container should emit this header key with the value set to the local hostname
* in HTTP responses
*/
@SuppressWarnings("unused")
public void setHostResponseHeaderKey(Optional<String> hostResponseheaderKey) {
Objects.requireNonNull(hostResponseheaderKey, "HostResponseheaderKey cannot be null");
this.hostResponseHeaderKey = hostResponseheaderKey;
}
public Http getHttp() {
return (parent instanceof ContainerCluster) ? ((ContainerCluster) parent).getHttp() : null;
}
public JettyHttpServer getDefaultHttpServer() {
return defaultHttpServer;
}
/** Returns the index of this node. The index of a given node is stable through changes with best effort. */
public final int index() { return index; }
public void addBuiltinHandlers() { }
@Override
public void initService(DeployLogger deployLogger) {
if (isInitialized()) return;
super.initService(deployLogger);
if (getHttp() == null) {
initDefaultJettyConnector();
} else {
reserveHttpPortsPrepended();
}
tagServers();
}
protected void tagServers() {
if (numHttpServerPorts > 0) {
portsMeta.on(0).tag("http").tag("query").tag("external").tag("state");
}
for (int i = 1; i < numHttpServerPorts; i++)
portsMeta.on(i).tag("http").tag("external");
if (messageBusEnabled()) {
portsMeta.on(numHttpServerPorts).tag("rpc").tag("messaging");
}
if (rpcServerEnabled()) {
portsMeta.on(numHttpServerPorts + 1).tag("rpc").tag("admin");
}
}
private void reserveHttpPortsPrepended() {
if (getHttp() != null && getHttp().getHttpServer() != null) {
for (ConnectorFactory connectorFactory : getHttp().getHttpServer().getConnectorFactories()) {
reservePortPrepended(getPort(connectorFactory), "http/" + connectorFactory.getName());
}
}
}
private int getPort(ConnectorFactory connectorFactory) {
return connectorFactory.getListenPort();
}
private void initDefaultJettyConnector() {
defaultHttpServer.addConnector(new ConnectorFactory("SearchServer", getSearchPort()));
}
private ContainerServiceType myServiceType = null;
/** Subclasses must implement {@link
@Override
public final String getServiceType() {
if (myServiceType == null) {
myServiceType = myServiceType();
}
return myServiceType.serviceName;
}
/** Subclasses must implement this for a custom service name. */
protected abstract ContainerServiceType myServiceType();
public void setClusterName(String name) {
this.clusterName = name;
}
@Override
public int getWantedPort() {
return requiresWantedPort() ? BASEPORT: 0;
}
/** instance can use any port number for its default HTTP server */
public void useDynamicPorts() {
requireSpecificPorts = false;
}
/**
* First Qrserver or container must run on ports familiar to the user.
*/
@Override
public boolean requiresWantedPort() {
return requireSpecificPorts && (getHttp() == null);
}
public boolean requiresConsecutivePorts() {
return false;
}
/**
* @return the number of ports needed by the Container - those reserved manually(reservePortPrepended)
*/
@Override
public String[] getPortSuffixes() {
int n = getPortCount();
String[] suffixes = new String[n];
int off = 0;
int httpPorts = (getHttp() != null) ? 0 : numHttpServerPorts;
if (httpPorts > 0) {
suffixes[off++] = "http";
}
for (int i = 1; i < httpPorts; i++) {
suffixes[off++] = "http/" + i;
}
if (messageBusEnabled()) {
suffixes[off++] = "messaging";
}
if (rpcServerEnabled()) {
suffixes[off++] = "rpc";
}
while (off < n) {
suffixes[off] = "unused/" + off;
++off;
}
assert (off == n);
return suffixes;
}
/**
* @return the actual search port
* TODO: Remove. Use {@link
*/
public int getSearchPort() {
if (getHttp() != null)
throw new AssertionError("getSearchPort must not be used when http section is present.");
return getRelativePort(0);
}
private int getRpcPort() {
return rpcServerEnabled() ? getRelativePort(numHttpServerPorts + numMessageBusPorts()) : 0;
}
private int numRpcPorts() { return rpcServerEnabled() ? 1 : 0; }
private int getMessagingPort() {
return messageBusEnabled() ? getRelativePort(numHttpServerPorts) : 0;
}
private int numMessageBusPorts() { return messageBusEnabled() ? 1 : 0; }
@Override
public int getHealthPort() {
final Http http = getHttp();
if (http != null) {
if (http.getHttpServer() == null) {
return -1;
} else {
return getRelativePort(0);
}
} else {
return httpServerEnabled() ? getSearchPort() : -1;
}
}
public String getStartupCommand() {
return "PRELOAD=" + getPreLoad() + " exec vespa-start-container-daemon " + getJvmOptions() + " ";
}
@Override
public void getConfig(QrConfig.Builder builder) {
builder.
rpc(new Rpc.Builder()
.enabled(rpcServerEnabled())
.port(getRpcPort())
.slobrokId(serviceSlobrokId())).
filedistributor(filedistributorConfig());
if (clusterName != null) {
builder.discriminator(clusterName+"."+name);
} else {
builder.discriminator(name);
}
}
/** Returns the jvm args set explicitly for this node */
public String getAssignedJvmOptions() { return super.getJvmOptions(); }
private String serviceSlobrokId() {
return "vespa/service/" + getConfigId();
}
private Filedistributor.Builder filedistributorConfig() {
Filedistributor.Builder builder = new Filedistributor.Builder();
FileDistributionConfigProducer fileDistribution = getRoot().getFileDistributionConfigProducer();
if (fileDistribution != null) {
builder.configid(fileDistribution.getConfigProducer(getHost()).getConfigId());
}
return builder;
}
@Override
public void getConfig(ComponentsConfig.Builder builder) {
builder.components.addAll(ComponentsConfigGenerator.generate(allEnabledComponents()));
}
private Collection<Component<?, ?>> allEnabledComponents() {
Collection<Component<?, ?>> allComponents = new ArrayList<>();
addAllEnabledComponents(allComponents, this);
return Collections.unmodifiableCollection(allComponents);
}
private void addAllEnabledComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) {
for (AbstractConfigProducer<?> child: current.getChildren().values()) {
if ( ! httpServerEnabled() && isHttpServer(child)) continue;
if (child instanceof Component)
allComponents.add((Component<?, ?>) child);
addAllEnabledComponents(allComponents, child);
}
}
private boolean isHttpServer(AbstractConfigProducer<?> component) {
return component instanceof JettyHttpServer;
}
@Override
public final void getConfig(JdiscBindingsConfig.Builder builder) {
builder.handlers(DiscBindingsConfigGenerator.generate(handlers.getComponents()));
}
@Override
public void getConfig(ContainerHttpConfig.Builder builder) {
if (hostResponseHeaderKey.isPresent())
builder.hostResponseHeaderKey(hostResponseHeaderKey.get());
}
@Override
public void getConfig(ContainerMbusConfig.Builder builder) {
builder.enabled(messageBusEnabled()).port(getMessagingPort());
}
@Override
public HashMap<String,String> getDefaultMetricDimensions(){
HashMap<String, String> dimensions = new HashMap<>();
if (clusterName != null)
dimensions.put("clustername", clusterName);
return dimensions;
}
private boolean messageBusEnabled() {
return containerCluster().isPresent() && containerCluster().get().messageBusEnabled();
}
private boolean httpServerEnabled() {
return containerCluster().isPresent() && containerCluster().get().httpServerEnabled();
}
private boolean rpcServerEnabled() {
return containerCluster().isPresent() && containerCluster().get().rpcServerEnabled();
}
private Optional<ContainerCluster> containerCluster() {
return (parent instanceof ContainerCluster) ? Optional.of((ContainerCluster) parent) : Optional.empty();
}
} | class Container extends AbstractService implements
QrConfig.Producer,
ComponentsConfig.Producer,
JdiscBindingsConfig.Producer,
ContainerHttpConfig.Producer,
ContainerMbusConfig.Producer {
public static final int BASEPORT = Defaults.getDefaults().vespaWebServicePort();
public static final String SINGLENODE_CONTAINER_SERVICESPEC = "default_singlenode_container";
protected final AbstractConfigProducer parent;
private final String name;
private boolean requireSpecificPorts = true;
private String clusterName = null;
private Optional<String> hostResponseHeaderKey = Optional.empty();
/** Whether this node has been marked as retired (e.g, will be removed) */
private final boolean retired;
/** The unique index of this node */
private final int index;
private final ComponentGroup<Handler<?>> handlers = new ComponentGroup<>(this, "handler");
private final ComponentGroup<Component<?, ?>> components = new ComponentGroup<>(this, "components");
private final JettyHttpServer defaultHttpServer = new JettyHttpServer(new ComponentId("DefaultHttpServer"));
protected final int numHttpServerPorts;
protected Container(AbstractConfigProducer parent, String name, int index) {
this(parent, name, false, index);
}
protected Container(AbstractConfigProducer parent, String name, boolean retired, int index) {
super(parent, name);
this.name = name;
this.parent = parent;
this.retired = retired;
this.index = index;
if (getHttp() == null) {
numHttpServerPorts = 2;
addChild(defaultHttpServer);
} else if (getHttp().getHttpServer() == null) {
numHttpServerPorts = 0;
} else {
numHttpServerPorts = getHttp().getHttpServer().getConnectorFactories().size();
}
addBuiltinHandlers();
addChild(new SimpleComponent("com.yahoo.container.jdisc.ConfiguredApplication$ApplicationContext"));
}
/** True if this container is retired (slated for removal) */
public boolean isRetired() { return retired; }
public ComponentGroup<Handler<?>> getHandlers() {
return handlers;
}
public ComponentGroup getComponents() {
return components;
}
public final void addComponent(Component c) {
components.addComponent(c);
}
public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) {
addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec)));
}
public final void addHandler(Handler h) {
handlers.addComponent(h);
}
/**
* If present, this container should emit this header key with the value set to the local hostname
* in HTTP responses
*/
@SuppressWarnings("unused")
public void setHostResponseHeaderKey(Optional<String> hostResponseheaderKey) {
Objects.requireNonNull(hostResponseheaderKey, "HostResponseheaderKey cannot be null");
this.hostResponseHeaderKey = hostResponseheaderKey;
}
public Http getHttp() {
return (parent instanceof ContainerCluster) ? ((ContainerCluster) parent).getHttp() : null;
}
public JettyHttpServer getDefaultHttpServer() {
return defaultHttpServer;
}
/** Returns the index of this node. The index of a given node is stable through changes with best effort. */
public final int index() { return index; }
public void addBuiltinHandlers() { }
@Override
public void initService(DeployLogger deployLogger) {
if (isInitialized()) return;
super.initService(deployLogger);
if (getHttp() == null) {
initDefaultJettyConnector();
} else {
reserveHttpPortsPrepended();
}
tagServers();
}
protected void tagServers() {
if (numHttpServerPorts > 0) {
portsMeta.on(0).tag("http").tag("query").tag("external").tag("state");
}
for (int i = 1; i < numHttpServerPorts; i++)
portsMeta.on(i).tag("http").tag("external");
if (messageBusEnabled()) {
portsMeta.on(numHttpServerPorts).tag("rpc").tag("messaging");
}
if (rpcServerEnabled()) {
portsMeta.on(numHttpServerPorts + 1).tag("rpc").tag("admin");
}
}
private void reserveHttpPortsPrepended() {
if (getHttp() != null && getHttp().getHttpServer() != null) {
for (ConnectorFactory connectorFactory : getHttp().getHttpServer().getConnectorFactories()) {
reservePortPrepended(getPort(connectorFactory), "http/" + connectorFactory.getName());
}
}
}
private int getPort(ConnectorFactory connectorFactory) {
return connectorFactory.getListenPort();
}
private void initDefaultJettyConnector() {
defaultHttpServer.addConnector(new ConnectorFactory("SearchServer", getSearchPort()));
}
private ContainerServiceType myServiceType = null;
/** Subclasses must implement {@link
@Override
public final String getServiceType() {
if (myServiceType == null) {
myServiceType = myServiceType();
}
return myServiceType.serviceName;
}
/** Subclasses must implement this for a custom service name. */
protected abstract ContainerServiceType myServiceType();
public void setClusterName(String name) {
this.clusterName = name;
}
@Override
public int getWantedPort() {
return requiresWantedPort() ? BASEPORT: 0;
}
/** instance can use any port number for its default HTTP server */
public void useDynamicPorts() {
requireSpecificPorts = false;
}
/**
* First Qrserver or container must run on ports familiar to the user.
*/
@Override
public boolean requiresWantedPort() {
return requireSpecificPorts && (getHttp() == null);
}
public boolean requiresConsecutivePorts() {
return false;
}
/**
* @return the number of ports needed by the Container except those reserved manually(reservePortPrepended)
*/
@Override
public String[] getPortSuffixes() {
int n = getPortCount();
String[] suffixes = new String[n];
int off = 0;
int httpPorts = (getHttp() != null) ? 0 : numHttpServerPorts;
if (httpPorts > 0) {
suffixes[off++] = "http";
}
for (int i = 1; i < httpPorts; i++) {
suffixes[off++] = "http/" + i;
}
if (messageBusEnabled()) {
suffixes[off++] = "messaging";
}
if (rpcServerEnabled()) {
suffixes[off++] = "rpc";
}
while (off < n) {
suffixes[off] = "unused/" + off;
++off;
}
assert (off == n);
return suffixes;
}
/**
* @return the actual search port
* TODO: Remove. Use {@link
*/
public int getSearchPort() {
if (getHttp() != null)
throw new AssertionError("getSearchPort must not be used when http section is present.");
return getRelativePort(0);
}
private int getRpcPort() {
return rpcServerEnabled() ? getRelativePort(numHttpServerPorts + numMessageBusPorts()) : 0;
}
private int numRpcPorts() { return rpcServerEnabled() ? 1 : 0; }
private int getMessagingPort() {
return messageBusEnabled() ? getRelativePort(numHttpServerPorts) : 0;
}
private int numMessageBusPorts() { return messageBusEnabled() ? 1 : 0; }
@Override
public int getHealthPort() {
final Http http = getHttp();
if (http != null) {
if (http.getHttpServer() == null) {
return -1;
} else {
return getRelativePort(0);
}
} else {
return httpServerEnabled() ? getSearchPort() : -1;
}
}
public String getStartupCommand() {
return "PRELOAD=" + getPreLoad() + " exec vespa-start-container-daemon " + getJvmOptions() + " ";
}
@Override
public void getConfig(QrConfig.Builder builder) {
builder.
rpc(new Rpc.Builder()
.enabled(rpcServerEnabled())
.port(getRpcPort())
.slobrokId(serviceSlobrokId())).
filedistributor(filedistributorConfig());
if (clusterName != null) {
builder.discriminator(clusterName+"."+name);
} else {
builder.discriminator(name);
}
}
/** Returns the jvm args set explicitly for this node */
public String getAssignedJvmOptions() { return super.getJvmOptions(); }
private String serviceSlobrokId() {
return "vespa/service/" + getConfigId();
}
private Filedistributor.Builder filedistributorConfig() {
Filedistributor.Builder builder = new Filedistributor.Builder();
FileDistributionConfigProducer fileDistribution = getRoot().getFileDistributionConfigProducer();
if (fileDistribution != null) {
builder.configid(fileDistribution.getConfigProducer(getHost()).getConfigId());
}
return builder;
}
@Override
public void getConfig(ComponentsConfig.Builder builder) {
builder.components.addAll(ComponentsConfigGenerator.generate(allEnabledComponents()));
}
private Collection<Component<?, ?>> allEnabledComponents() {
Collection<Component<?, ?>> allComponents = new ArrayList<>();
addAllEnabledComponents(allComponents, this);
return Collections.unmodifiableCollection(allComponents);
}
private void addAllEnabledComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) {
for (AbstractConfigProducer<?> child: current.getChildren().values()) {
if ( ! httpServerEnabled() && isHttpServer(child)) continue;
if (child instanceof Component)
allComponents.add((Component<?, ?>) child);
addAllEnabledComponents(allComponents, child);
}
}
private boolean isHttpServer(AbstractConfigProducer<?> component) {
return component instanceof JettyHttpServer;
}
@Override
public final void getConfig(JdiscBindingsConfig.Builder builder) {
builder.handlers(DiscBindingsConfigGenerator.generate(handlers.getComponents()));
}
@Override
public void getConfig(ContainerHttpConfig.Builder builder) {
if (hostResponseHeaderKey.isPresent())
builder.hostResponseHeaderKey(hostResponseHeaderKey.get());
}
@Override
public void getConfig(ContainerMbusConfig.Builder builder) {
builder.enabled(messageBusEnabled()).port(getMessagingPort());
}
@Override
public HashMap<String,String> getDefaultMetricDimensions(){
HashMap<String, String> dimensions = new HashMap<>();
if (clusterName != null)
dimensions.put("clustername", clusterName);
return dimensions;
}
private boolean messageBusEnabled() {
return containerCluster().isPresent() && containerCluster().get().messageBusEnabled();
}
private boolean httpServerEnabled() {
return containerCluster().isPresent() && containerCluster().get().httpServerEnabled();
}
private boolean rpcServerEnabled() {
return containerCluster().isPresent() && containerCluster().get().rpcServerEnabled();
}
private Optional<ContainerCluster> containerCluster() {
return (parent instanceof ContainerCluster) ? Optional.of((ContainerCluster) parent) : Optional.empty();
}
} |
No, because getPortCount is called before the `portsMeta` is populated. | public int getPortCount() {
int httpPorts = (getHttp() != null) ? 0 : numHttpServerPorts + 2;
return httpPorts + numMessageBusPorts() + numRpcPorts();
} | return httpPorts + numMessageBusPorts() + numRpcPorts(); | public int getPortCount() {
int httpPorts = (getHttp() != null) ? 0 : numHttpServerPorts + 2;
return httpPorts + numMessageBusPorts() + numRpcPorts();
} | class Container extends AbstractService implements
QrConfig.Producer,
ComponentsConfig.Producer,
JdiscBindingsConfig.Producer,
ContainerHttpConfig.Producer,
ContainerMbusConfig.Producer {
public static final int BASEPORT = Defaults.getDefaults().vespaWebServicePort();
public static final String SINGLENODE_CONTAINER_SERVICESPEC = "default_singlenode_container";
protected final AbstractConfigProducer parent;
private final String name;
private boolean requireSpecificPorts = true;
private String clusterName = null;
private Optional<String> hostResponseHeaderKey = Optional.empty();
/** Whether this node has been marked as retired (e.g, will be removed) */
private final boolean retired;
/** The unique index of this node */
private final int index;
private final ComponentGroup<Handler<?>> handlers = new ComponentGroup<>(this, "handler");
private final ComponentGroup<Component<?, ?>> components = new ComponentGroup<>(this, "components");
private final JettyHttpServer defaultHttpServer = new JettyHttpServer(new ComponentId("DefaultHttpServer"));
protected final int numHttpServerPorts;
protected Container(AbstractConfigProducer parent, String name, int index) {
this(parent, name, false, index);
}
protected Container(AbstractConfigProducer parent, String name, boolean retired, int index) {
super(parent, name);
this.name = name;
this.parent = parent;
this.retired = retired;
this.index = index;
if (getHttp() == null) {
numHttpServerPorts = 2;
addChild(defaultHttpServer);
} else if (getHttp().getHttpServer() == null) {
numHttpServerPorts = 0;
} else {
numHttpServerPorts = getHttp().getHttpServer().getConnectorFactories().size();
}
addBuiltinHandlers();
addChild(new SimpleComponent("com.yahoo.container.jdisc.ConfiguredApplication$ApplicationContext"));
}
/** True if this container is retired (slated for removal) */
public boolean isRetired() { return retired; }
public ComponentGroup<Handler<?>> getHandlers() {
return handlers;
}
public ComponentGroup getComponents() {
return components;
}
public final void addComponent(Component c) {
components.addComponent(c);
}
public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) {
addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec)));
}
public final void addHandler(Handler h) {
handlers.addComponent(h);
}
/**
* If present, this container should emit this header key with the value set to the local hostname
* in HTTP responses
*/
@SuppressWarnings("unused")
public void setHostResponseHeaderKey(Optional<String> hostResponseheaderKey) {
Objects.requireNonNull(hostResponseheaderKey, "HostResponseheaderKey cannot be null");
this.hostResponseHeaderKey = hostResponseheaderKey;
}
public Http getHttp() {
return (parent instanceof ContainerCluster) ? ((ContainerCluster) parent).getHttp() : null;
}
public JettyHttpServer getDefaultHttpServer() {
return defaultHttpServer;
}
/** Returns the index of this node. The index of a given node is stable through changes with best effort. */
public final int index() { return index; }
public void addBuiltinHandlers() { }
@Override
public void initService(DeployLogger deployLogger) {
if (isInitialized()) return;
super.initService(deployLogger);
if (getHttp() == null) {
initDefaultJettyConnector();
} else {
reserveHttpPortsPrepended();
}
tagServers();
}
protected void tagServers() {
if (numHttpServerPorts > 0) {
portsMeta.on(0).tag("http").tag("query").tag("external").tag("state");
}
for (int i = 1; i < numHttpServerPorts; i++)
portsMeta.on(i).tag("http").tag("external");
if (messageBusEnabled()) {
portsMeta.on(numHttpServerPorts).tag("rpc").tag("messaging");
}
if (rpcServerEnabled()) {
portsMeta.on(numHttpServerPorts + 1).tag("rpc").tag("admin");
}
}
private void reserveHttpPortsPrepended() {
if (getHttp() != null && getHttp().getHttpServer() != null) {
for (ConnectorFactory connectorFactory : getHttp().getHttpServer().getConnectorFactories()) {
reservePortPrepended(getPort(connectorFactory), "http/" + connectorFactory.getName());
}
}
}
private int getPort(ConnectorFactory connectorFactory) {
return connectorFactory.getListenPort();
}
private void initDefaultJettyConnector() {
defaultHttpServer.addConnector(new ConnectorFactory("SearchServer", getSearchPort()));
}
private ContainerServiceType myServiceType = null;
/** Subclasses must implement {@link
@Override
public final String getServiceType() {
if (myServiceType == null) {
myServiceType = myServiceType();
}
return myServiceType.serviceName;
}
/** Subclasses must implement this for a custom service name. */
protected abstract ContainerServiceType myServiceType();
public void setClusterName(String name) {
this.clusterName = name;
}
@Override
public int getWantedPort() {
return requiresWantedPort() ? BASEPORT: 0;
}
/** instance can use any port number for its default HTTP server */
public void useDynamicPorts() {
requireSpecificPorts = false;
}
/**
* First Qrserver or container must run on ports familiar to the user.
*/
@Override
public boolean requiresWantedPort() {
return requireSpecificPorts && (getHttp() == null);
}
public boolean requiresConsecutivePorts() {
return false;
}
/**
* @return the number of ports needed by the Container - those reserved manually(reservePortPrepended)
*/
@Override
public String[] getPortSuffixes() {
int n = getPortCount();
String[] suffixes = new String[n];
int off = 0;
int httpPorts = (getHttp() != null) ? 0 : numHttpServerPorts;
if (httpPorts > 0) {
suffixes[off++] = "http";
}
for (int i = 1; i < httpPorts; i++) {
suffixes[off++] = "http/" + i;
}
if (messageBusEnabled()) {
suffixes[off++] = "messaging";
}
if (rpcServerEnabled()) {
suffixes[off++] = "rpc";
}
while (off < n) {
suffixes[off] = "unused/" + off;
++off;
}
assert (off == n);
return suffixes;
}
/**
* @return the actual search port
* TODO: Remove. Use {@link
*/
public int getSearchPort() {
if (getHttp() != null)
throw new AssertionError("getSearchPort must not be used when http section is present.");
return getRelativePort(0);
}
private int getRpcPort() {
return rpcServerEnabled() ? getRelativePort(numHttpServerPorts + numMessageBusPorts()) : 0;
}
private int numRpcPorts() { return rpcServerEnabled() ? 1 : 0; }
private int getMessagingPort() {
return messageBusEnabled() ? getRelativePort(numHttpServerPorts) : 0;
}
private int numMessageBusPorts() { return messageBusEnabled() ? 1 : 0; }
@Override
public int getHealthPort() {
final Http http = getHttp();
if (http != null) {
if (http.getHttpServer() == null) {
return -1;
} else {
return getRelativePort(0);
}
} else {
return httpServerEnabled() ? getSearchPort() : -1;
}
}
public String getStartupCommand() {
return "PRELOAD=" + getPreLoad() + " exec vespa-start-container-daemon " + getJvmOptions() + " ";
}
@Override
public void getConfig(QrConfig.Builder builder) {
builder.
rpc(new Rpc.Builder()
.enabled(rpcServerEnabled())
.port(getRpcPort())
.slobrokId(serviceSlobrokId())).
filedistributor(filedistributorConfig());
if (clusterName != null) {
builder.discriminator(clusterName+"."+name);
} else {
builder.discriminator(name);
}
}
/** Returns the jvm args set explicitly for this node */
public String getAssignedJvmOptions() { return super.getJvmOptions(); }
private String serviceSlobrokId() {
return "vespa/service/" + getConfigId();
}
private Filedistributor.Builder filedistributorConfig() {
Filedistributor.Builder builder = new Filedistributor.Builder();
FileDistributionConfigProducer fileDistribution = getRoot().getFileDistributionConfigProducer();
if (fileDistribution != null) {
builder.configid(fileDistribution.getConfigProducer(getHost()).getConfigId());
}
return builder;
}
@Override
public void getConfig(ComponentsConfig.Builder builder) {
builder.components.addAll(ComponentsConfigGenerator.generate(allEnabledComponents()));
}
private Collection<Component<?, ?>> allEnabledComponents() {
Collection<Component<?, ?>> allComponents = new ArrayList<>();
addAllEnabledComponents(allComponents, this);
return Collections.unmodifiableCollection(allComponents);
}
private void addAllEnabledComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) {
for (AbstractConfigProducer<?> child: current.getChildren().values()) {
if ( ! httpServerEnabled() && isHttpServer(child)) continue;
if (child instanceof Component)
allComponents.add((Component<?, ?>) child);
addAllEnabledComponents(allComponents, child);
}
}
private boolean isHttpServer(AbstractConfigProducer<?> component) {
return component instanceof JettyHttpServer;
}
@Override
public final void getConfig(JdiscBindingsConfig.Builder builder) {
builder.handlers(DiscBindingsConfigGenerator.generate(handlers.getComponents()));
}
@Override
public void getConfig(ContainerHttpConfig.Builder builder) {
if (hostResponseHeaderKey.isPresent())
builder.hostResponseHeaderKey(hostResponseHeaderKey.get());
}
@Override
public void getConfig(ContainerMbusConfig.Builder builder) {
builder.enabled(messageBusEnabled()).port(getMessagingPort());
}
@Override
public HashMap<String,String> getDefaultMetricDimensions(){
HashMap<String, String> dimensions = new HashMap<>();
if (clusterName != null)
dimensions.put("clustername", clusterName);
return dimensions;
}
private boolean messageBusEnabled() {
return containerCluster().isPresent() && containerCluster().get().messageBusEnabled();
}
private boolean httpServerEnabled() {
return containerCluster().isPresent() && containerCluster().get().httpServerEnabled();
}
private boolean rpcServerEnabled() {
return containerCluster().isPresent() && containerCluster().get().rpcServerEnabled();
}
private Optional<ContainerCluster> containerCluster() {
return (parent instanceof ContainerCluster) ? Optional.of((ContainerCluster) parent) : Optional.empty();
}
} | class Container extends AbstractService implements
QrConfig.Producer,
ComponentsConfig.Producer,
JdiscBindingsConfig.Producer,
ContainerHttpConfig.Producer,
ContainerMbusConfig.Producer {
public static final int BASEPORT = Defaults.getDefaults().vespaWebServicePort();
public static final String SINGLENODE_CONTAINER_SERVICESPEC = "default_singlenode_container";
protected final AbstractConfigProducer parent;
private final String name;
private boolean requireSpecificPorts = true;
private String clusterName = null;
private Optional<String> hostResponseHeaderKey = Optional.empty();
/** Whether this node has been marked as retired (e.g, will be removed) */
private final boolean retired;
/** The unique index of this node */
private final int index;
private final ComponentGroup<Handler<?>> handlers = new ComponentGroup<>(this, "handler");
private final ComponentGroup<Component<?, ?>> components = new ComponentGroup<>(this, "components");
private final JettyHttpServer defaultHttpServer = new JettyHttpServer(new ComponentId("DefaultHttpServer"));
protected final int numHttpServerPorts;
protected Container(AbstractConfigProducer parent, String name, int index) {
this(parent, name, false, index);
}
protected Container(AbstractConfigProducer parent, String name, boolean retired, int index) {
super(parent, name);
this.name = name;
this.parent = parent;
this.retired = retired;
this.index = index;
if (getHttp() == null) {
numHttpServerPorts = 2;
addChild(defaultHttpServer);
} else if (getHttp().getHttpServer() == null) {
numHttpServerPorts = 0;
} else {
numHttpServerPorts = getHttp().getHttpServer().getConnectorFactories().size();
}
addBuiltinHandlers();
addChild(new SimpleComponent("com.yahoo.container.jdisc.ConfiguredApplication$ApplicationContext"));
}
/** True if this container is retired (slated for removal) */
public boolean isRetired() { return retired; }
public ComponentGroup<Handler<?>> getHandlers() {
return handlers;
}
public ComponentGroup getComponents() {
return components;
}
public final void addComponent(Component c) {
components.addComponent(c);
}
public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) {
addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec)));
}
public final void addHandler(Handler h) {
handlers.addComponent(h);
}
/**
* If present, this container should emit this header key with the value set to the local hostname
* in HTTP responses
*/
@SuppressWarnings("unused")
public void setHostResponseHeaderKey(Optional<String> hostResponseheaderKey) {
Objects.requireNonNull(hostResponseheaderKey, "HostResponseheaderKey cannot be null");
this.hostResponseHeaderKey = hostResponseheaderKey;
}
public Http getHttp() {
return (parent instanceof ContainerCluster) ? ((ContainerCluster) parent).getHttp() : null;
}
public JettyHttpServer getDefaultHttpServer() {
return defaultHttpServer;
}
/** Returns the index of this node. The index of a given node is stable through changes with best effort. */
public final int index() { return index; }
public void addBuiltinHandlers() { }
@Override
public void initService(DeployLogger deployLogger) {
if (isInitialized()) return;
super.initService(deployLogger);
if (getHttp() == null) {
initDefaultJettyConnector();
} else {
reserveHttpPortsPrepended();
}
tagServers();
}
protected void tagServers() {
if (numHttpServerPorts > 0) {
portsMeta.on(0).tag("http").tag("query").tag("external").tag("state");
}
for (int i = 1; i < numHttpServerPorts; i++)
portsMeta.on(i).tag("http").tag("external");
if (messageBusEnabled()) {
portsMeta.on(numHttpServerPorts).tag("rpc").tag("messaging");
}
if (rpcServerEnabled()) {
portsMeta.on(numHttpServerPorts + 1).tag("rpc").tag("admin");
}
}
private void reserveHttpPortsPrepended() {
if (getHttp() != null && getHttp().getHttpServer() != null) {
for (ConnectorFactory connectorFactory : getHttp().getHttpServer().getConnectorFactories()) {
reservePortPrepended(getPort(connectorFactory), "http/" + connectorFactory.getName());
}
}
}
private int getPort(ConnectorFactory connectorFactory) {
return connectorFactory.getListenPort();
}
private void initDefaultJettyConnector() {
defaultHttpServer.addConnector(new ConnectorFactory("SearchServer", getSearchPort()));
}
private ContainerServiceType myServiceType = null;
/** Subclasses must implement {@link
@Override
public final String getServiceType() {
if (myServiceType == null) {
myServiceType = myServiceType();
}
return myServiceType.serviceName;
}
/** Subclasses must implement this for a custom service name. */
protected abstract ContainerServiceType myServiceType();
public void setClusterName(String name) {
this.clusterName = name;
}
@Override
public int getWantedPort() {
return requiresWantedPort() ? BASEPORT: 0;
}
/** instance can use any port number for its default HTTP server */
public void useDynamicPorts() {
requireSpecificPorts = false;
}
/**
* First Qrserver or container must run on ports familiar to the user.
*/
@Override
public boolean requiresWantedPort() {
return requireSpecificPorts && (getHttp() == null);
}
public boolean requiresConsecutivePorts() {
return false;
}
/**
* @return the number of ports needed by the Container except those reserved manually(reservePortPrepended)
*/
@Override
public String[] getPortSuffixes() {
int n = getPortCount();
String[] suffixes = new String[n];
int off = 0;
int httpPorts = (getHttp() != null) ? 0 : numHttpServerPorts;
if (httpPorts > 0) {
suffixes[off++] = "http";
}
for (int i = 1; i < httpPorts; i++) {
suffixes[off++] = "http/" + i;
}
if (messageBusEnabled()) {
suffixes[off++] = "messaging";
}
if (rpcServerEnabled()) {
suffixes[off++] = "rpc";
}
while (off < n) {
suffixes[off] = "unused/" + off;
++off;
}
assert (off == n);
return suffixes;
}
/**
* @return the actual search port
* TODO: Remove. Use {@link
*/
public int getSearchPort() {
if (getHttp() != null)
throw new AssertionError("getSearchPort must not be used when http section is present.");
return getRelativePort(0);
}
private int getRpcPort() {
return rpcServerEnabled() ? getRelativePort(numHttpServerPorts + numMessageBusPorts()) : 0;
}
private int numRpcPorts() { return rpcServerEnabled() ? 1 : 0; }
private int getMessagingPort() {
return messageBusEnabled() ? getRelativePort(numHttpServerPorts) : 0;
}
private int numMessageBusPorts() { return messageBusEnabled() ? 1 : 0; }
@Override
public int getHealthPort() {
final Http http = getHttp();
if (http != null) {
if (http.getHttpServer() == null) {
return -1;
} else {
return getRelativePort(0);
}
} else {
return httpServerEnabled() ? getSearchPort() : -1;
}
}
public String getStartupCommand() {
return "PRELOAD=" + getPreLoad() + " exec vespa-start-container-daemon " + getJvmOptions() + " ";
}
@Override
public void getConfig(QrConfig.Builder builder) {
builder.
rpc(new Rpc.Builder()
.enabled(rpcServerEnabled())
.port(getRpcPort())
.slobrokId(serviceSlobrokId())).
filedistributor(filedistributorConfig());
if (clusterName != null) {
builder.discriminator(clusterName+"."+name);
} else {
builder.discriminator(name);
}
}
/** Returns the jvm args set explicitly for this node */
public String getAssignedJvmOptions() { return super.getJvmOptions(); }
private String serviceSlobrokId() {
return "vespa/service/" + getConfigId();
}
private Filedistributor.Builder filedistributorConfig() {
Filedistributor.Builder builder = new Filedistributor.Builder();
FileDistributionConfigProducer fileDistribution = getRoot().getFileDistributionConfigProducer();
if (fileDistribution != null) {
builder.configid(fileDistribution.getConfigProducer(getHost()).getConfigId());
}
return builder;
}
@Override
public void getConfig(ComponentsConfig.Builder builder) {
builder.components.addAll(ComponentsConfigGenerator.generate(allEnabledComponents()));
}
private Collection<Component<?, ?>> allEnabledComponents() {
Collection<Component<?, ?>> allComponents = new ArrayList<>();
addAllEnabledComponents(allComponents, this);
return Collections.unmodifiableCollection(allComponents);
}
private void addAllEnabledComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) {
for (AbstractConfigProducer<?> child: current.getChildren().values()) {
if ( ! httpServerEnabled() && isHttpServer(child)) continue;
if (child instanceof Component)
allComponents.add((Component<?, ?>) child);
addAllEnabledComponents(allComponents, child);
}
}
private boolean isHttpServer(AbstractConfigProducer<?> component) {
return component instanceof JettyHttpServer;
}
@Override
public final void getConfig(JdiscBindingsConfig.Builder builder) {
builder.handlers(DiscBindingsConfigGenerator.generate(handlers.getComponents()));
}
@Override
public void getConfig(ContainerHttpConfig.Builder builder) {
if (hostResponseHeaderKey.isPresent())
builder.hostResponseHeaderKey(hostResponseHeaderKey.get());
}
@Override
public void getConfig(ContainerMbusConfig.Builder builder) {
builder.enabled(messageBusEnabled()).port(getMessagingPort());
}
@Override
public HashMap<String,String> getDefaultMetricDimensions(){
HashMap<String, String> dimensions = new HashMap<>();
if (clusterName != null)
dimensions.put("clustername", clusterName);
return dimensions;
}
private boolean messageBusEnabled() {
return containerCluster().isPresent() && containerCluster().get().messageBusEnabled();
}
private boolean httpServerEnabled() {
return containerCluster().isPresent() && containerCluster().get().httpServerEnabled();
}
private boolean rpcServerEnabled() {
return containerCluster().isPresent() && containerCluster().get().rpcServerEnabled();
}
private Optional<ContainerCluster> containerCluster() {
return (parent instanceof ContainerCluster) ? Optional.of((ContainerCluster) parent) : Optional.empty();
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.